Run Agent Spec configurations with AutoGen#
This usage example showcases the creation of a simple Agent Spec Agent, subsequently serialized into JSON and converted into an AutoGen assistant. Also includes mapping of a ServerTool and execution of the conversation.
# Create a Agent Spec agent
from pyagentspec.agent import Agent
from pyagentspec.llms.openaicompatibleconfig import OpenAiCompatibleConfig
from pyagentspec.property import FloatProperty
from pyagentspec.tools import ServerTool
subtraction_tool = ServerTool(
name="subtraction-tool",
description="subtract two numbers together",
inputs=[FloatProperty(title="a"), FloatProperty(title="b")],
outputs=[FloatProperty(title="difference")],
)
agentspec_llm_config = OpenAiCompatibleConfig(
name="llama-3.3-70b-instruct",
model_id="/storage/models/Llama-3.3-70B-Instruct",
url="url.to.my.llm",
)
agentspec_agent = Agent(
name="agentspec_tools_test",
description="agentspec_tools_test",
llm_config=agentspec_llm_config,
system_prompt="Perform subtraction with the given tool.",
tools=[subtraction_tool],
)
# Export the Agent Spec configuration
from pyagentspec.serialization import AgentSpecSerializer
agentspec_config = AgentSpecSerializer().to_json(agentspec_agent)
# Load and run the Agent Spec configuration with AutoGen
from pyagentspec.adapters.autogen import AgentSpecLoader
def subtract(a: float, b: float) -> float:
return a - b
async def main() -> None:
converter = AgentSpecLoader(tool_registry={"subtraction-tool": subtract})
component = converter.load_json(agentspec_config)
while True:
input_cmd = input("USER >> ")
if input_cmd == "q":
break
result = await component.run(task=input_cmd)
print(f"AGENT >> {result.messages[-1].content}")
await component._model_client.close()
# anyio.run(main)
# USER >> Compute 987654321-123456789
# AGENT >> The result of the subtraction is 864197532.