from upsonic import Agent, Task
from upsonic.safety_engine.policies.pii_policies import PIIAnonymizePolicy
# Create agent with PII anonymization
agent = Agent(
"anthropic/claude-sonnet-4-5",
user_policy=PIIAnonymizePolicy, # Prevents data leak to LLM Providers
debug=True # Enable debug to see policy application
)
# User input with PII
task = Task(
description="My email is john.doe@example.com and phone is 555-1234. What are my email and phone?"
)
# Execute with automatic anonymization and de-anonymization
# What happens under the hood:
# 1. Input: "My email is john.doe@example.com and phone is 555-1234..."
# 2. Anonymized (sent to LLM): "My email is EMAIL_1 and phone is PHONE_1..."
# 3. LLM Response: "Your email is EMAIL_1 and phone is PHONE_1"
# 4. De-anonymized (returned to you): "Your email is john.doe@example.com and phone is 555-1234"
#
# Result: Sensitive data never reaches the cloud, but you get fully functional results!
result = agent.do(task)
print(result) # Returns: "Your email is john.doe@example.com and phone is 555-1234"