from upsonic import Agent, Task
from upsonic.safety_engine.policies.pii_policies import PIIBlockPolicy
from upsonic.safety_engine.base import RuleBase, ActionBase, Policy
from upsonic.safety_engine.models import PolicyInput, RuleOutput, PolicyOutput
from typing import Optional, Dict, Any
import re
# 1. Define custom rule for company secrets
class CompanySecretRule(RuleBase):
"""Detects sensitive company information"""
name = "Company Secret Rule"
description = "Detects confidential company terms"
language = "en"
def __init__(self, options: Optional[Dict[str, Any]] = None):
super().__init__(options)
self.keywords = [
"confidential", "internal strategy", "trade secret",
"proprietary", "classified", "restricted"
]
def process(self, policy_input: PolicyInput) -> RuleOutput:
combined_text = " ".join(policy_input.input_texts or []).lower()
triggered = []
for keyword in self.keywords:
pattern = r'\b' + re.escape(keyword.lower()) + r'\b'
if re.search(pattern, combined_text):
triggered.append(keyword)
if not triggered:
return RuleOutput(
confidence=0.0,
content_type="SAFE",
details="No confidential content detected"
)
return RuleOutput(
confidence=1.0,
content_type="COMPANY_SECRET",
details=f"Found {len(triggered)} confidential terms",
triggered_keywords=triggered
)
# 2. Define custom action
class CompanySecretAction(ActionBase):
"""Blocks company confidential content"""
name = "Company Secret Action"
description = "Blocks confidential company information"
language = "en"
def action(self, rule_result: RuleOutput) -> PolicyOutput:
if rule_result.confidence >= 0.8:
return self.raise_block_error(
"Company confidential information detected and blocked."
)
return self.allow_content()
# 3. Create the policy
company_secrets_policy = Policy(
name="Company Secrets Policy",
description="Protects confidential company information",
rule=CompanySecretRule(),
action=CompanySecretAction()
)
agent = Agent(
model="openai/gpt-4o-mini",
name="Customer Support Agent",
# Block sensitive user queries
user_policy=company_secrets_policy,
user_policy_feedback=True,
user_policy_feedback_loop=1,
# Ensure agent doesn't leak PII in responses
agent_policy=PIIBlockPolicy,
agent_policy_feedback=True,
agent_policy_feedback_loop=2,
debut=True
)
# Scenario 1: User asks about sensitive info
task1 = Task(
description="What is our internal strategy for Q4 product launches?"
)
result1 = agent.do(task1)
# Returns: Helpful feedback explaining why the question can't be answered
# Scenario 2: Agent needs to generate customer data
task2 = Task(
description="Generate a sample customer support ticket with customer contact information"
)
result2 = agent.do(task2)
# Agent retries until output is compliant (no real PII)