from upsonic import Agent, Task, KnowledgeBase
from upsonic.loaders import TextLoader, TextLoaderConfig
from upsonic.embeddings import OpenAIEmbedding, OpenAIEmbeddingConfig
from upsonic.text_splitter import AgenticChunker, AgenticChunkingConfig
from upsonic.vectordb import ChromaProvider, ChromaConfig, ConnectionConfig, Mode
# Create agent for cognitive processing
agent = Agent("openai/gpt-4o")
# Configure splitter
splitter_config = AgenticChunkingConfig(
chunk_size=512,
chunk_overlap=50,
max_agent_retries=3,
enable_proposition_caching=True
)
splitter = AgenticChunker(agent, splitter_config)
# Setup KnowledgeBase
loader = TextLoader(TextLoaderConfig())
embedding = OpenAIEmbedding(OpenAIEmbeddingConfig())
vectordb = ChromaProvider(ChromaConfig(
collection_name="agentic_docs",
vector_size=1536,
connection=ConnectionConfig(mode=Mode.IN_MEMORY)
))
kb = KnowledgeBase(
sources=["document.txt"],
embedding_provider=embedding,
vectordb=vectordb,
loaders=[loader],
splitters=[splitter]
)
# Query with Agent
query_agent = Agent("openai/gpt-4o")
task = Task("What are the main propositions?", context=[kb])
result = query_agent.do(task)
print(result)