agent = Agent( 'gemini-1.5-flash', system_prompt='Be concise, reply with one sentence.', )
result = agent.run_sync('Where does "hello world" come from?') print(result.data) """ The first known use of "hello, world" was in a 1974 textbook about the C programming language. """
ingFang SC", "Hiragino Sans GB", "Microsoft YaHei UI", "Microsoft YaHei", Arial, sans-serif;padding-left: 8px;color: rgb(63, 63, 63);">GenAI 中的 PydanticingFang SC", "Hiragino Sans GB", "Microsoft YaHei UI", "Microsoft YaHei", Arial, sans-serif;font-feature-settings: normal;font-variation-settings: normal;font-size: 14px;margin: 10px 8px;background: rgb(46, 52, 64);color: rgb(216, 222, 233);text-align: left;line-height: 1.5;overflow-x: auto;border-radius: 8px;padding: 0px !important;">fromdatetimeimportdate frompydanticimportBaseModel fromopenaiimportOpenAI classUser(BaseModel): """Definition of a user""" id:int name:str dob: date response = OpenAI().chat.completions.create( model='gpt-4o', messages=[ {'role':'system','content':'Extract information about the user'}, {'role':'user','content':'The user with ID 123 is called Samuel, born on Jan 28th 87'} ], tools=[ { 'function': { 'name': User.__name__, 'description': User.__doc__, 'parameters': User.model_json_schema(), }, 'type':'function' } ] ) user = User.model_validate_json(response.choices[0].message.tool_calls[0].function.arguments) print(user) ingFang SC", "Hiragino Sans GB", "Microsoft YaHei UI", "Microsoft YaHei", Arial, sans-serif;padding-left: 8px;color: rgb(63, 63, 63);">PydanticAIingFang SC", "Hiragino Sans GB", "Microsoft YaHei UI", "Microsoft YaHei", Arial, sans-serif;font-size: 14px;letter-spacing: 0.1em;color: rgb(63, 63, 63);">同样的例子还有 PydanticAI - 用于生产的Agent框架。ingFang SC", "Hiragino Sans GB", "Microsoft YaHei UI", "Microsoft YaHei", Arial, sans-serif;font-feature-settings: normal;font-variation-settings: normal;font-size: 14px;margin: 10px 8px;background: rgb(46, 52, 64);color: rgb(216, 222, 233);text-align: left;line-height: 1.5;overflow-x: auto;border-radius: 8px;padding: 0px !important;">fromdatetimeimportdate frompydantic_aiimportAgent frompydanticimportBaseModel classUser(BaseModel): """Definition of a user""" id:int name:str dob: date agent = Agent( 'openai:gpt-4o', result_type=User, system_prompt='Extract information about the user', ) result = agent.run_sync('The user with ID 123 is called Samuel, born on Jan 28th 87') print(result.data) ingFang SC", "Hiragino Sans GB", "Microsoft YaHei UI", "Microsoft YaHei", Arial, sans-serif;letter-spacing: normal;border-width: 0px 0px 2px;border-style: solid;border-bottom-color: rgb(250, 81, 81);text-align: center;line-height: 1.75;display: table;">为什么选择 PydanticAI?
假设你正在制作一个应用,用户可以提交姓名、年龄和电子邮件。你希望确保
• 名称是一个字符串。
• 年龄只是一个数字。
• 电子邮件格式有效。
Pydantic 如何轻松做到这一点:
frompydanticimportBaseModel, EmailStr # Define the model classUser(BaseModel): name:str age:int email: EmailStr # Example input user_data = { "name":"Alice", "age":25, "email":"alice@example.com" } # Validate the input user = User(**user_data) print(user.name) # Alice print(user.age) # 25 print(user.email)# alice@example.com
如果用户提交的数据无效(如 "年龄":"25"),Pydantic 会自动出错:
user_data = { "name":"Alice", "age":"twenty-five", # Invalid "email":"alice@example.com" } user = User(**user_data) # Error: value is not a valid integer
asyncdefmain(): asyncwithhttpx.AsyncClient()asclient: deps = MyDeps('foobar', client) result =awaitagent.run('Tell me a joke.', deps=deps) print(result.data) #> Did you hear about the toothpaste scandal? They called it Colgate.
MyDeps 是注入到 agent.run 方法中的依赖项
Function Tools功能工具
Function Tools 为模型提供了一种检索额外信息的机制,以帮助它们生成响应。 当把Agent可能需要的所有上下文放入system prompt不切实际或不可能时,或者当你想通过把生成响应所需的一些逻辑延迟到另一个(不一定由AI驱动的)工具来使Agent的行为更确定或更可靠时,它们就很有用。 向Agent注册工具有多种方法:
• 通过 @agent.tool 装饰器 - 用于需要访问Agent上下文的工具
• 通过 @agent.tool_plain 装饰器 - 用于不需要访问Agent上下文的工具
• 通过工具关键字参数,Agent 可以使用普通函数,也可以使用工具实例。
• @agent.tool 被认为是默认装饰器,因为在大多数情况下,工具需要访问Agent上下文。
下面是一个同时使用这两种方法的例子:
importrandom
frompydantic_aiimportAgent, RunContext
agent = Agent( 'gemini-1.5-flash', deps_type=str, system_prompt=( "You're a dice game, you should roll the die and see if the number " "you get back matches the user's guess. If so, tell them they're a winner. " "Use the player's name in the response." ), )
@agent.tool_plain defroll_die() ->str: """Roll a six-sided die and return the result.""" returnstr(random.randint(1,6))
@agent.tool defget_player_name(ctx: RunContext[str]) ->str: """Get the player's name.""" returnctx.deps
dice_result = agent.run_sync('My guess is 4', deps='Anne') print(dice_result.data) #> Congratulations Anne, you guessed correctly! You're a winner!
agent = Agent('openai:gpt-4o', system_prompt='Be a helpful assistant.')
result1 = agent.run_sync('Tell me a joke.') print(result1.data) #> Did you hear about the toothpaste scandal? They called it Colgate.
result2 = agent.run_sync('Explain?', message_history=result1.new_messages()) print(result2.data) #> This is an excellent joke invent by Samuel Colvin, it needs no explanation.
print(result2.all_messages()) """ [ ModelRequest( parts=[ SystemPromptPart( content='Be a helpful assistant.', part_kind='system-prompt' ), UserPromptPart( content='Tell me a joke.', timestamp=datetime.datetime(...), part_kind='user-prompt', ), ], kind='request', ), ModelResponse( parts=[ TextPart( content='Did you hear about the toothpaste scandal? They called it Colgate.', part_kind='text', ) ], timestamp=datetime.datetime(...), kind='response', ), ModelRequest( parts=[ UserPromptPart( content='Explain?', timestamp=datetime.datetime(...), part_kind='user-prompt', ) ], kind='request', ), ModelResponse( parts=[ TextPart( content='This is an excellent joke invent by Samuel Colvin, it needs no explanation.', part_kind='text', ) ], timestamp=datetime.datetime(...), kind='response', ), ] """
asyncdefrun_weather_forecast( user_prompts:list[tuple[str,int]], conn: DatabaseConn ): """Run weather forecast for a list of user prompts and save.""" asyncwithWeatherService()asweather_service:
asyncdefrun_forecast(prompt:str, user_id:int): result =awaitweather_agent.run(prompt, deps=weather_service) awaitconn.store_forecast(user_id, result.data)
# run all prompts in parallel awaitasyncio.gather( *(run_forecast(prompt, user_id)for(prompt, user_id)inuser_prompts) )
asyncdeftest_forecast(): conn = DatabaseConn() user_id =1 withcapture_run_messages()asmessages: withweather_agent.override(model=TestModel()): prompt ='What will the weather be like in London on 2024-11-28?' awaitrun_weather_forecast([(prompt, user_id)], conn)
forecast =awaitconn.get_forecast(user_id) assertforecast =='{"weather_forecast":"Sunny with a chance of rain"}'
assertmessages == [ ModelRequest( parts=[ SystemPromptPart( content='Providing a weather forecast at the locations the user provides.', ), UserPromptPart( content='What will the weather be like in London on 2024-11-28?', timestamp=IsNow(tz=timezone.utc), ), ] ), ModelResponse( parts=[ ToolCallPart( tool_name='weather_forecast', args=ArgsDict( args_dict={ 'location':'a', 'forecast_date':'2024-01-01', } ), tool_call_id=None, ) ], timestamp=IsNow(tz=timezone.utc), ), ModelRequest( parts=[ ToolReturnPart( tool_name='weather_forecast', content='Sunny with a chance of rain', tool_call_id=None, timestamp=IsNow(tz=timezone.utc), ), ], ), ModelResponse( parts=[ TextPart( content='{"weather_forecast":"Sunny with a chance of rain"}', ) ], timestamp=IsNow(tz=timezone.utc), ), ]
defcall_weather_forecast( messages:list[ModelMessage], info: AgentInfo ) -> ModelResponse: iflen(messages) ==1: # first call, call the weather forecast tool user_prompt = messages[0].parts[-1] m = re.search(r'\d{4}-\d{2}-\d{2}', user_prompt.content) assertmisnotNone args = {'location':'London','forecast_date': m.group()} returnModelResponse( parts=[ToolCallPart.from_raw_args('weather_forecast', args)] ) else: # second call, return the forecast msg = messages[-1].parts[0] assertmsg.part_kind =='tool-return' returnModelResponse.from_text(f'The forecast is:{msg.content}')
asyncdeftest_forecast_future(): conn = DatabaseConn() user_id =1 withweather_agent.override(model=FunctionModel(call_weather_forecast)): prompt ='What will the weather be like in London on 2032-01-01?' awaitrun_weather_forecast([(prompt, user_id)], conn)
forecast =awaitconn.get_forecast(user_id) assertforecast =='The forecast is: Rainy with a chance of sun'
frompydantic_aiimportAgent frompydanticimportBaseModel # Define the structure of the response classCityInfo(BaseModel): city:str country:str # Create an agent agent = Agent( model='openai:gpt-4o',# Specify your model result_type=CityInfo# Enforce the structure of the response ) # Run the agent if__name__ =='__main__': result = agent.run_sync("Tell me about Paris.") print(result.data)# Outputs: {'city': 'Paris', 'country': 'France'}
frompydantic_aiimportAgent, RunContext importrandom # Define the agent agent = Agent('openai:gpt-4o') # Add a tool to roll a die @agent.tool asyncdefroll_die(ctx: RunContext, sides:int=6) ->int: """Rolls a die with the specified number of sides.""" returnrandom.randint(1, sides) # Run the agent if__name__ =='__main__': result = agent.run_sync("Roll a 20-sided die.") print(result.data) # Outputs a random number between 1 and 20
model = GeminiModel('gemini-1.5-flash', api_key=os.environ['GEMINI_API_KEY'])
# Agent setup search_agent = Agent( model= model , result_type=Content, system_prompt=( """you are Senior Research Analyst and your work as a leading tech think tank. Your expertise lies in identifying emerging trends. You have a knack for dissecting complex data and presenting actionable insights.given topic pydantic AI. Full analysis report in bullet points""" ), retries=2 )
content_writer_agents = Agent( model= model , deps_type=Content, result_type=BlogPostBaseModel, system_prompt=( """You are a renowned Content Strategist, known for your insightful and engaging articles.use search_web for getting the list of points You transform complex concepts into compelling narratives.Full blog post of at least 4 paragraphs include paragrahs,headings, bullet points include html tags, please remove '\\n\\n'}""" ), retries=2 )
# Web Search for your query @search_agent.tool asyncdefsearch_web( ctx: RunContext[Deps], web_query:str ) ->str: """Web Search for your query.""" tavily_client = TavilyClient(api_key=ctx.deps.tvly_api_key) response = tavily_client.search(web_query) returnjson.dumps(response)
@search_agent.tool asyncdefcontent_writer_agent( ctx: RunContext[Deps], question:str ) ->str: """Use this tool to communicate with content strategist""" print(question) response =awaitctx.deps.content_strategist_agent.run(user_prompt=question) ctx.deps.content = response.data print("contentstragist") returnresponse.data