Documentation Index
Fetch the complete documentation index at: https://docs.upsonic.ai/llms.txt
Use this file to discover all available pages before exploring further.
Example Policy
Combine your custom rule and action into a complete policy:
from upsonic.safety_engine.base import Policy
company_security_policy = Policy(
name="Company Security Policy",
description="Protects confidential company information",
rule=CompanySecretRule(),
action=CompanySecretAction(),
language="auto"
)
from upsonic import Agent, Task
agent = Agent(
model="anthropic/claude-sonnet-4-6",
name="Company Assistant",
user_policy=company_security_policy,
debug=True
)
task = Task("Tell me about Project Zeus")
result = agent.print_do(task)
Policy Scope
Control which parts of the input are subject to policy enforcement using scope parameters. When not specified, all scopes default to True.
| Parameter | What it Controls | Default |
|---|
apply_to_description | Task description text | True |
apply_to_context | Task context | True |
apply_to_system_prompt | Agent system prompt | True |
apply_to_chat_history | Chat history from memory | True |
apply_to_tool_outputs | Tool return values | True |
from upsonic.safety_engine.base import Policy
from upsonic.safety_engine.policies.pii_policies import PIIRule, PIIAnonymizeAction
scoped_policy = Policy(
name="PII Anonymize - Selective",
description="Only anonymize PII in description and context",
rule=PIIRule(),
action=PIIAnonymizeAction(),
apply_to_description=True,
apply_to_context=True,
apply_to_system_prompt=False,
apply_to_chat_history=False,
apply_to_tool_outputs=True,
)
Scope Resolution Priority
Scope flags are resolved with Policy > Task > Agent priority:
- If the
Policy sets a scope flag (e.g., apply_to_description=False), that value is used
- Otherwise, if the
Task sets the flag (e.g., policy_apply_to_description=False), that value is used
- Otherwise, the
Agent default is used (e.g., user_policy_apply_to_description=True)
from upsonic import Agent, Task
from upsonic.safety_engine.base import Policy
from upsonic.safety_engine.policies.pii_policies import PIIRule, PIIAnonymizeAction
policy = Policy(
name="PII Policy",
description="PII anonymization",
rule=PIIRule(),
action=PIIAnonymizeAction(),
apply_to_system_prompt=False, # Policy-level: skip system prompt
)
agent = Agent(
"anthropic/claude-sonnet-4-6",
system_prompt="User email: john.doe@example.com", # NOT anonymized
user_policy=policy,
user_policy_apply_to_description=True, # Agent-level default,
debug=True
)
task = Task(
description="My email is john.doe@example.com", # Anonymized
policy_apply_to_context=False, # Task-level: skip context
)
result = agent.print_do(task)
Advanced Policy Configuration
You can specify different LLM models for different operations:
advanced_policy = Policy(
name="Advanced Security Policy",
description="Uses different models for different tasks",
rule=SmartConfidentialRule(),
action=SmartSecretAction(),
language="auto",
language_identify_model="gpt-3.5-turbo",
base_model="gpt-3.5-turbo",
text_finder_model="gpt-4"
)
from upsonic.safety_engine.llm import UpsonicLLMProvider
language_llm = UpsonicLLMProvider(
agent_name="Language Detector",
model="gpt-3.5-turbo"
)
advanced_policy_2 = Policy(
name="Advanced Security Policy",
description="Uses custom LLM providers",
rule=SmartConfidentialRule(),
action=SmartSecretAction(),
language="auto",
language_identify_llm=language_llm,
base_llm=UpsonicLLMProvider(model="gpt-4"),
text_finder_llm=UpsonicLLMProvider(model="gpt-4")
)
Using with Agent
from upsonic import Agent, Task
agent = Agent(
model="anthropic/claude-sonnet-4-6",
name="Secure Assistant",
user_policy=company_security_policy,
agent_policy=company_security_policy,
debug=True
)
try:
task = Task("What's the status of Project Zeus?")
result = agent.print_do(task)
print(result)
except Exception as e:
print(f"Blocked: {e}")
Tool safety policies validate tools at two stages:
tool_policy_pre: Validates tools during registration (before task execution)
tool_policy_post: Validates tool calls before execution (when LLM calls a tool)
from upsonic import Agent, Task
agent = Agent(
model="anthropic/claude-sonnet-4-6",
tool_policy_pre=company_security_policy,
tool_policy_post=company_security_policy,
debug=True
)
task = Task("Delete all files", tools=[harmful_tool])
result = agent.print_do(task)
Streaming Support
Custom policies work seamlessly with streaming. For Anonymize actions, de-anonymization happens token-by-token in real-time:
import asyncio
from upsonic import Agent, Task
from upsonic.safety_engine.base import Policy
from upsonic.safety_engine.policies.pii_policies import PIIRule, PIIAnonymizeAction
async def main():
policy = Policy(
name="Scoped PII",
description="PII anonymization with scoping",
rule=PIIRule(),
action=PIIAnonymizeAction(),
apply_to_description=True,
apply_to_system_prompt=False,
)
agent = Agent(
"anthropic/claude-sonnet-4-6",
user_policy=policy,
debug=True,
)
task = Task(description="My email is john.doe@example.com. What is my email?")
# Pure text streaming
async for text in agent.astream(task):
print(text, end="", flush=True)
print()
asyncio.run(main())
Async Support
All policies automatically support async operations:
import asyncio
from upsonic import Agent, Task
agent = Agent(
model="anthropic/claude-sonnet-4-6",
user_policy=company_security_policy,
debug=True
)
async def main():
result = await agent.print_do_async(Task("Confidential query"))
print(result)
asyncio.run(main())
Complete Example
Here’s a full working example combining everything:
from upsonic import Agent, Task
from upsonic.safety_engine.base import RuleBase, ActionBase, Policy
from upsonic.safety_engine.models import PolicyInput, RuleOutput, PolicyOutput
import re
class ProjectCodeRule(RuleBase):
name = "Project Code Rule"
description = "Detects internal project codes"
language = "en"
def __init__(self, options=None):
super().__init__(options)
self.pattern = r'\b[A-Z]{2,4}-\d{3,5}\b'
def process(self, policy_input: PolicyInput) -> RuleOutput:
text = " ".join(policy_input.input_texts or [])
matches = re.findall(self.pattern, text)
if not matches:
return RuleOutput(
confidence=0.0,
content_type="SAFE",
details="No project codes found"
)
return RuleOutput(
confidence=1.0,
content_type="PROJECT_CODE",
details=f"Found {len(matches)} project codes",
triggered_keywords=matches
)
class ProjectCodeAction(ActionBase):
name = "Project Code Action"
description = "Redacts project codes"
language = "en"
def action(self, rule_result: RuleOutput) -> PolicyOutput:
if rule_result.confidence >= 0.8:
return self.replace_triggered_keywords("[PROJECT-CODE]")
return self.allow_content()
project_policy = Policy(
name="Project Code Policy",
description="Protects internal project codes",
rule=ProjectCodeRule(),
action=ProjectCodeAction(),
apply_to_description=True,
apply_to_context=True,
apply_to_system_prompt=False,
)
agent = Agent(
model="anthropic/claude-sonnet-4-6",
agent_policy=project_policy,
debug=True
)
task = Task("The issue is in ABC-1234 and XYZ-5678")
result = agent.print_do(task)
print(result)