ifnot BASE_URL ornot API_KEY ornot MODEL_NAME: raise ValueError( "Please set EXAMPLE_BASE_URL, EXAMPLE_API_KEY, EXAMPLE_MODEL_NAME via env var or code." )
"""This example uses a custom provider for all requests by default. We do three things: 1. Create a custom client. 2. Set it as the default OpenAI client, and don't use it for tracing. 3. Set the default API as Chat Completions, as most LLM providers don't yet support Responses API. Note that in this example, we disable tracing under the assumption that you don't have an API key from platform.openai.com. If you do have one, you can either set the `OPENAI_API_KEY` env var or call set_tracing_export_api_key() to set a tracing specific key. """
@function_tool defget_weather(city: str): print(f"[debug] getting weather for {city}") returnf"The weather in {city} is sunny."
asyncdefmain(): agent = Agent( name="Assistant", instructions="You are a helpful assistant. be VERY concise.", model=MODEL_NAME, tools=[get_weather], )
result = await Runner.run(agent, "What's the weather in Tokyo?") print(result.final_output)
guardrail_results = [] # 依次执行围栏任务,如果触发了tripwire_triggered,中文意思理解为绊线? # 就取消所有的任务 for done in asyncio.as_completed(guardrail_tasks): result = await done if result.output.tripwire_triggered: # Cancel all guardrail tasks if a tripwire is triggered. for t in guardrail_tasks: t.cancel() _error_tracing.attach_error_to_current_span( SpanError( message="Guardrail tripwire triggered", data={"guardrail": result.guardrail.get_name()}, ) ) raise InputGuardrailTripwireTriggered(result) else: guardrail_results.append(result)
"""Create a new guardrail span. The span will not be started automatically, you should either do `with guardrail_span() ...` or call `span.start()` + `span.finish()` manually. Args: name: The name of the guardrail. triggered: Whether the guardrail was triggered. span_id: The ID of the span. Optional. If not provided, we will generate an ID. We recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are correctly formatted. parent: The parent span or trace. If not provided, we will automatically use the current trace/span as the parent. disabled: If True, we will return a Span but the Span will not be recorded. """
-->if should_run_agent_start_hooks: asyncio.gather() # 说白了就是先执行hook函数 -->await cls._get_new_response -->model = cls._get_model(agent, run_config) # 这里走到了OpenAIProvider的get_model方法,上面有写 -->new_response = await model.get_response -->context_wrapper.usage.add(new_response.usage) -->return new_response -->returnawait cls._get_single_step_result_from_response # 返回ProcessedResponse -->RunImpl.process_model_response # 返回一个SingleStepResult -->await RunImpl.execute_tools_and_side_effects # First, lets run the tool calls - function tools and computer actions -->await asyncio.gather(execute_function_tool_calls,execute_computer_actions) # Second, check if there are any handoffs -->returnawait cls.execute_handoffs # Third, we'll check if the tool use should result in a final output # if check_tool_use.is_final_output: return await cls.execute_final_output # check if the model also produced a final output -->potential_final_output_text # There are two possibilities that lead to a final output: # 1. Structured output schema => always leads to a final output # 2. Plain text output schema => only leads to a final output if there are no tool calls -->returnawait cls.execute_final_output # If there's no final output, we can just run again -->return SingleStepResult # 这里是最终的结果
最终的返回值为turn_result.next_step = NextStepFinalOutput(output='The weather in Tokyo is sunny.')里面的output