response = client.responses.create( model="gpt-3.5-turbo-0125", input=[ {"role": "system", "content": "You are a helpful assistant designed to output JSON."}, {"role": "user", "content": "Who won the world series in 2020? Please respond in the format {winner: ...}"} ], text={"format": {"type": "json_object"}} )
{ "type": "function", "name": "get_weather", "description": "Retrieves current weather for the given location.", "parameters": { "type": "object", "properties": { "location": { "type": "string", "description": "City and country e.g. Bogotá, Colombia" }, "units": { "type": "string", "enum": [ "celsius", "fahrenheit" ], "description": "Units the temperature will be returned in." } }, "required": [ "location", "units" ], "additionalProperties": false }, "strict": true }
流式响应
流式响应允许在模型继续生成完整响应的同时,开始打印或处理模型输出的开头部分。
设置stream=True即可使用:
from openai import OpenAI client = OpenAI()
stream = client.responses.create( model="gpt-4.1", input=[ { "role": "user", "content": "Say 'double bubble bath' ten times fast.", }, ], stream=True, )
for event in stream: print(event)
Responses API 使用语义事件进行流式传输,每个事件都按照预定义的模式进行类型标注。完整的事件类型列表,请参阅流式传输的 API 参考文档。
Agent - Agno
Agent 创建
简单Agent:
from agno.agent import Agent from agno.models.anthropic import Claude from agno.tools.yfinance import YFinanceTools
agent = Agent( model=Claude(id="claude-sonnet-4-20250514"), tools=[YFinanceTools(stock_price=True)], instructions="Use tables to display data. Don't include any other text.", markdown=True, ) agent.print_response("What is the stock price of Apple?", stream=True)
OpenAI Like 模型
from os import getenv from agno.agent import Agent, RunResponse from agno.models.openai.like import OpenAILike
# Print the response in the terminal agent.print_response("Share a 2 sentence horror story.")
运行 Agent
从 agno 1.6.0 开始,run()将返回 RunResponseEvent 的迭代对象。
from typing import Iterator from agno.agent import Agent, RunResponseEvent from agno.models.openai import OpenAIChat from agno.utils.pprint import pprint_run_response
agent = Agent(model=OpenAIChat(id="gpt-4-mini"))
# Run agent and return the response as a stream response_stream: Iterator[RunResponseEvent] = agent.run( "Tell me a 5 second short story about a lion", stream=True )
# Print the response stream in markdown format pprint_run_response(response_stream, markdown=True)
from agno.agent import Agent from agno.models.openai import OpenAIChat
# Define a tool that adds an item to the shopping list defadd_item(agent: Agent, item: str) -> str: """Add an item to the shopping list.""" agent.session_state["shopping_list"].append(item) returnf"The shopping list is now {agent.session_state['shopping_list']}"
# Create an Agent that maintains state agent = Agent( model=OpenAIChat(id="gpt-4o-mini"), # Initialize the session state with a counter starting at 0 session_state={"shopping_list": []}, tools=[add_item], # You can use variables from the session state in the instructions instructions="Current state (shopping list) is: {shopping_list}", # Important: Add the state to the messages add_state_in_messages=True, markdown=True, )
# Example usage agent.print_response("Add milk, eggs, and bread to the shopping list", stream=True) print(f"Final session state: {agent.session_state}")
跨对话保持状态
示例:
from textwrap import dedent
from agno.agent import Agent from agno.models.openai import OpenAIChat
# Define tools to manage our shopping list defadd_item(agent: Agent, item: str) -> str: """Add an item to the shopping list and return confirmation.""" # Add the item if it's not already in the list if item.lower() notin [i.lower() for i in agent.session_state["shopping_list"]]: agent.session_state["shopping_list"].append(item) returnf"Added '{item}' to the shopping list" else: returnf"'{item}' is already in the shopping list"
defremove_item(agent: Agent, item: str) -> str: """Remove an item from the shopping list by name.""" # Case-insensitive search for i, list_item inenumerate(agent.session_state["shopping_list"]): if list_item.lower() == item.lower(): agent.session_state["shopping_list"].pop(i) returnf"Removed '{list_item}' from the shopping list"
returnf"'{item}' was not found in the shopping list"
deflist_items(agent: Agent) -> str: """List all items in the shopping list.""" shopping_list = agent.session_state["shopping_list"]
ifnot shopping_list: return"The shopping list is empty."
items_text = "\n".join([f"- {item}"for item in shopping_list]) returnf"Current shopping list:\n{items_text}"
# Create a Shopping List Manager Agent that maintains state agent = Agent( model=OpenAIChat(id="gpt-4o-mini"), # Initialize the session state with an empty shopping list session_state={"shopping_list": []}, tools=[add_item, remove_item, list_items], # You can use variables from the session state in the instructions instructions=dedent("""\ Your job is to manage a shopping list. The shopping list starts empty. You can add items, remove items by name, and list all items. Current shopping list: {shopping_list} """), show_tool_calls=True, add_state_in_messages=True, markdown=True, )
# Example usage agent.print_response("Add milk, eggs, and bread to the shopping list", stream=True) print(f"Session state: {agent.session_state}")
agent.print_response("I need apples and oranges", stream=True) print(f"Session state: {agent.session_state}")
agent.print_response("whats on my list?", stream=True) print(f"Session state: {agent.session_state}")
agent.print_response("Clear everything from my list and start over with just bananas and yogurt", stream=True) print(f"Session state: {agent.session_state}")
状态持久化保存
需要设置storage。
示例:
"""Run `pip install agno openai sqlalchemy` to install dependencies."""
from agno.agent import Agent from agno.models.openai import OpenAIChat from agno.storage.sqlite import SqliteStorage
# Define a tool that adds an item to the shopping list defadd_item(agent: Agent, item: str) -> str: """Add an item to the shopping list.""" if item notin agent.session_state["shopping_list"]: agent.session_state["shopping_list"].append(item) returnf"The shopping list is now {agent.session_state['shopping_list']}"
agent = Agent( model=OpenAIChat(id="gpt-4o-mini"), # Fix the session id to continue the same session across execution cycles session_id="fixed_id_for_demo", # Initialize the session state with an empty shopping list session_state={"shopping_list": []}, # Add a tool that adds an item to the shopping list tools=[add_item], # Store the session state in a SQLite database storage=SqliteStorage(table_name="agent_sessions", db_file="tmp/data.db"), # Add the current shopping list from the state in the instructions instructions="Current shopping list is: {shopping_list}", # Important: Set `add_state_in_messages=True` # to make `{shopping_list}` available in the instructions add_state_in_messages=True, markdown=True, )
# Example usage agent.print_response("What's on my shopping list?", stream=True) print(f"Session state: {agent.session_state}") agent.print_response("Add milk, eggs, and bread", stream=True) print(f"Session state: {agent.session_state}")
Memory
Agno 提供三种 memory:
Session Storage (chat history and session state):将 Agent 的会话以及session_state保存在数据库中,并使 Agent 能够进行多轮对话。
User Memories (user preferences):Agent 可以存储它通过对话了解到的有关用户的见解和事实。
from agno.agent import Agent from agno.memory.v2.db.sqlite import SqliteMemoryDb from agno.memory.v2.memory import Memory from agno.models.openai import OpenAIChat from agno.storage.sqlite import SqliteStorage from rich.pretty import pprint
# UserId for the memories user_id = "ava" # Database file for memory and storage db_file = "tmp/agent.db"
# Initialize memory.v2 memory = Memory( # Use any model for creating memories model=OpenAIChat(id="gpt-4.1"), db=SqliteMemoryDb(table_name="user_memories", db_file=db_file), ) # Initialize storage storage = SqliteStorage(table_name="agent_sessions", db_file=db_file)
# Initialize Agent memory_agent = Agent( model=OpenAIChat(id="gpt-4.1"), # Store memories in a database memory=memory, # Give the Agent the ability to update memories enable_agentic_memory=True, # OR - Run the MemoryManager after each response enable_user_memories=True, # Store the chat history in the database storage=storage, # Add the chat history to the messages add_history_to_messages=True, # Number of history runs num_history_runs=3, markdown=True, )
memory.clear() memory_agent.print_response( "My name is Ava and I like to ski.", user_id=user_id, stream=True, stream_intermediate_steps=True, ) print("Memories about Ava:") pprint(memory.get_user_memories(user_id=user_id))
memory_agent.print_response( "I live in san francisco, where should i move within a 4 hour drive?", user_id=user_id, stream=True, stream_intermediate_steps=True, ) print("Memories about Ava:") pprint(memory.get_user_memories(user_id=user_id))
会话总结
from agno.agent import Agent from agno.memory.v2.db.sqlite import SqliteMemoryDb from agno.memory.v2.memory import Memory from agno.models.google.gemini import Gemini
agent = Agent( # Add functions or Toolkits tools=[...], # Show tool calls in the Agent response show_tool_calls=True )
使用已有 toolkit
from agno.agent import Agent from agno.tools.duckduckgo import DuckDuckGoTools
agent = Agent(tools=[DuckDuckGoTools()], show_tool_calls=True, markdown=True) agent.print_response("Whats happening in France?", stream=True)
自定义 Tools
import json import httpx
from agno.agent import Agent
defget_top_hackernews_stories(num_stories: int = 10) -> str: """Use this function to get top stories from Hacker News. Args: num_stories (int): Number of stories to return. Defaults to 10. Returns: str: JSON string of top stories. """
# Fetch top story IDs response = httpx.get('https://hacker-news.firebaseio.com/v0/topstories.json') story_ids = response.json()
# Fetch story details stories = [] for story_id in story_ids[:num_stories]: story_response = httpx.get(f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json') story = story_response.json() if"text"in story: story.pop("text", None) stories.append(story) return json.dumps(stories)
agent = Agent(tools=[get_top_hackernews_stories], show_tool_calls=True, markdown=True) agent.print_response("Summarize the top 5 stories on hackernews?", stream=True)
结构化输出
from typing importList from rich.pretty import pprint from pydantic import BaseModel, Field from agno.agent import Agent, RunResponse from agno.models.openai import OpenAIChat
classMovieScript(BaseModel): setting: str = Field(..., description="Provide a nice setting for a blockbuster movie.") ending: str = Field(..., description="Ending of the movie. If not available, provide a happy ending.") genre: str = Field( ..., description="Genre of the movie. If not available, select action, thriller or romantic comedy." ) name: str = Field(..., description="Give a name to this movie") characters: List[str] = Field(..., description="Name of characters for this movie.") storyline: str = Field(..., description="3 sentence storyline for the movie. Make it exciting!")
# Agent that uses JSON mode json_mode_agent = Agent( model=OpenAIChat(id="gpt-4o"), description="You write movie scripts.", response_model=MovieScript, use_json_mode=True, ) json_mode_agent.print_response("New York")
# Agent that uses structured outputs structured_output_agent = Agent( model=OpenAIChat(id="gpt-4o"), description="You write movie scripts.", response_model=MovieScript, )