Installation
StateGraph is included in Upsonic. Make sure you have it installed:
Your First Graph
Let’s build a simple conversational AI that greets users and responds to their questions.
Step 1: Define Your State
States are typed dictionaries that flow through your graph. Use TypedDict for type safety:
from typing import Annotated, List
from typing_extensions import TypedDict
import operator
class ConversationState(TypedDict):
# Messages append using operator.add as the reducer
messages: Annotated[List[str], operator.add]
# Simple fields - new values replace old
current_step: str
response: str
What are Reducers?
The Annotated[List[str], operator.add] syntax means “when merging state, use operator.add” - so new messages append to the list instead of replacing it.
Step 2: Create Node Functions
Nodes are functions that process state and return updates:
from upsonic.models import infer_model
from upsonic.messages import ModelRequest, UserPromptPart, SystemPromptPart
def greeting_node(state: ConversationState) -> dict:
"""Add a greeting to the conversation."""
return {
"messages": ["Hello! How can I help you today?"],
"current_step": "greeted"
}
def process_node(state: ConversationState) -> dict:
"""Process the user's message with an LLM."""
# Get the conversation history
all_messages = state.get("messages", [])
# Use Upsonic's Model to generate a response
model = infer_model("openai/gpt-4o-mini")
request = ModelRequest(parts=[
SystemPromptPart(content="You are a helpful assistant."),
UserPromptPart(content=" ".join(all_messages))
])
response_text = model.invoke([request])
return {
"response": response_text,
"messages": [f"Assistant: {response_text}"],
"current_step": "processed"
}
Nodes receive the entire state but only need to return the fields they want to update. The graph handles merging automatically.
Step 3: Build the Graph
Connect your nodes with edges:
from upsonic.graphv2 import StateGraph, START, END
# Create the graph builder
builder = StateGraph(ConversationState)
# Add nodes
builder.add_node("greeting", greeting_node)
builder.add_node("process", process_node)
# Add edges
builder.add_edge(START, "greeting") # Start → greeting
builder.add_edge("greeting", "process") # greeting → process
builder.add_edge("process", END) # process → End
# Compile the graph
graph = builder.compile()
START and END are special constants representing the entry and exit points of your graph.
Step 4: Execute the Graph
Now invoke your graph with initial state:
result = graph.invoke({
"messages": ["User: Tell me a joke about programming"],
"current_step": "started",
"response": ""
})
print(f"Current step: {result['current_step']}")
print(f"Final response: {result['response']}")
print(f"All messages: {result['messages']}")
Output:
Current step: processed
Final response: Why do programmers prefer dark mode? Because light attracts bugs!
All messages: ['User: Tell me a joke about programming', 'Hello! How can I help you today?', 'Assistant: Why do programmers prefer dark mode? Because light attracts bugs!']
Complete Example
Here’s the full code in one place:
from typing import Annotated, List
from typing_extensions import TypedDict
import operator
from upsonic.graphv2 import StateGraph, START, END
from upsonic.models import infer_model
from upsonic.messages import ModelRequest, UserPromptPart, SystemPromptPart
# 1. Define state
class ConversationState(TypedDict):
messages: Annotated[List[str], operator.add]
current_step: str
response: str
# 2. Define nodes
def greeting_node(state: ConversationState) -> dict:
return {
"messages": ["Hello! How can I help you today?"],
"current_step": "greeted"
}
def process_node(state: ConversationState) -> dict:
all_messages = state.get("messages", [])
model = infer_model("openai/gpt-4o-mini")
request = ModelRequest(parts=[
SystemPromptPart(content="You are a helpful assistant."),
UserPromptPart(content=" ".join(all_messages))
])
response_text = model.invoke([request])
return {
"response": response_text,
"messages": [f"Assistant: {response_text}"],
"current_step": "processed"
}
# 3. Build graph
builder = StateGraph(ConversationState)
builder.add_node("greeting", greeting_node)
builder.add_node("process", process_node)
builder.add_edge(START, "greeting")
builder.add_edge("greeting", "process")
builder.add_edge("process", END)
# 4. Compile and execute
graph = builder.compile()
result = graph.invoke({
"messages": ["User: Tell me a joke"],
"current_step": "started",
"response": ""
})
print(result["response"])
Adding Conditional Logic
Let’s extend our graph to route based on user intent:
from upsonic.graphv2 import Command
from typing import Literal
class ConversationState(TypedDict):
messages: Annotated[List[str], operator.add]
intent: str
response: str
def classify_intent(state: ConversationState) -> Command[Literal["answer_question", "tell_joke", END]]:
"""Classify user intent and route accordingly."""
user_message = state["messages"][-1].lower()
if "joke" in user_message:
return Command(
update={"intent": "joke"},
goto="tell_joke"
)
elif "?" in user_message:
return Command(
update={"intent": "question"},
goto="answer_question"
)
else:
return Command(
update={"intent": "unknown", "response": "I'm not sure how to help with that."},
goto=END
)
def tell_joke(state: ConversationState) -> dict:
return {"response": "Why do programmers prefer dark mode? Light attracts bugs!"}
def answer_question(state: ConversationState) -> dict:
# Use LLM to answer question
model = infer_model("openai/gpt-4o-mini")
response = model.invoke(state["messages"][-1])
return {"response": response}
# Build with routing
builder = StateGraph(ConversationState)
builder.add_node("classify", classify_intent)
builder.add_node("tell_joke", tell_joke)
builder.add_node("answer_question", answer_question)
builder.add_edge(START, "classify")
builder.add_edge("tell_joke", END)
builder.add_edge("answer_question", END)
graph = builder.compile()
# Test different intents
result1 = graph.invoke({
"messages": ["Tell me a joke!"],
"intent": "",
"response": ""
})
print(f"Joke: {result1['response']}")
result2 = graph.invoke({
"messages": ["What is Python?"],
"intent": "",
"response": ""
})
print(f"Answer: {result2['response']}")
Command allows nodes to both update state and specify where to go next. The type annotation Command[Literal["answer_question", "tell_joke", END]] provides type safety for routing.
What’s Next?
Now that you’ve built your first graph, explore more advanced features:
Common Patterns
Pattern 1: Linear Pipeline
builder.add_edge(START, "step1")
builder.add_edge("step1", "step2")
builder.add_edge("step2", "step3")
builder.add_edge("step3", END)
Pattern 2: Conditional Branching
builder.add_edge(START, "classify")
# classify node returns Command(goto=...) to route
builder.add_edge("option_a", END)
builder.add_edge("option_b", END)
Pattern 3: Loops
def check_condition(state) -> Command:
if state["count"] < 10:
return Command(update={"count": state["count"] + 1}, goto="process")
return Command(goto=END)
builder.add_edge(START, "process")
builder.add_node("process", process_node)
# process returns Command to loop back or exit
When creating loops, always set a recursion_limit in your config to prevent infinite loops:graph.invoke(state, config={"recursion_limit": 50})