# Analyze the user message before sending it to the LLM
analysis_result = sonnylabs_client.analyze_text(user_message, scan_type="input")
# Check for prompt injections
prompt_injection = sonnylabs_client.get_prompt_injections(analysis_result)
if prompt_injection and prompt_injection["score"] > 0.65: # Threshold can be adjusted, recommended of over 0.65
return "I detected potential prompt injection in your message. Please try again."
# Check for PII on the input message
input_pii_items = sonnylabs_client.get_pii(analysis_result)
if input_pii_items:
pii_types = [item["label"] for item in input_pii_items]
return f"Personal information detected in the user message ({', '.join(pii_types)})."
# If no security issues are found, process the message normally
llm_response = [call to LLM here]
# Scan the outgoing message for PII
output_analysis = sonnylabs_client.analyze_text(llm_response, scan_type="output")
# repeat the PII test on the LLM response
output_pii_items = sonnylabs_client.get_pii(output_analysis)
if output_pii_items:
pii_types = [item["label"] for item in output_pii_items]
return f"Personal information detected in the LLM response ({', '.join(pii_types)})."