Skip to content

Commit d88bf14

Browse files
authored
Bugfix | Fixed a bug when calling reasoning models with store=False (#920)
resolves #919
1 parent 0f21c8a commit d88bf14

File tree

3 files changed

+11
-1
lines changed

3 files changed

+11
-1
lines changed

src/agents/model_settings.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
from typing import Any, Literal
66

77
from openai._types import Body, Headers, Query
8+
from openai.types.responses import ResponseIncludable
89
from openai.types.shared import Reasoning
910
from pydantic import BaseModel
1011

@@ -61,6 +62,10 @@ class ModelSettings:
6162
"""Whether to include usage chunk.
6263
Defaults to True if not provided."""
6364

65+
response_include: list[ResponseIncludable] | None = None
66+
"""Additional output data to include in the model response.
67+
[include parameter](https://platform.openai.com/docs/api-reference/responses/create#responses-create-include)"""
68+
6469
extra_query: Query | None = None
6570
"""Additional query fields to provide with the request.
6671
Defaults to None if not provided."""

src/agents/models/openai_responses.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,10 @@ async def _fetch_response(
240240
converted_tools = Converter.convert_tools(tools, handoffs)
241241
response_format = Converter.get_response_format(output_schema)
242242

243+
include: list[ResponseIncludable] = converted_tools.includes
244+
if model_settings.response_include is not None:
245+
include = list({*include, *model_settings.response_include})
246+
243247
if _debug.DONT_LOG_MODEL_DATA:
244248
logger.debug("Calling LLM")
245249
else:
@@ -258,7 +262,7 @@ async def _fetch_response(
258262
instructions=self._non_null_or_not_given(system_instructions),
259263
model=self.model,
260264
input=list_input,
261-
include=converted_tools.includes,
265+
include=include,
262266
tools=converted_tools.tools,
263267
prompt=self._non_null_or_not_given(prompt),
264268
temperature=self._non_null_or_not_given(model_settings.temperature),

tests/model_settings/test_serialization.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ def test_all_fields_serialization() -> None:
4444
metadata={"foo": "bar"},
4545
store=False,
4646
include_usage=False,
47+
response_include=["reasoning.encrypted_content"],
4748
extra_query={"foo": "bar"},
4849
extra_body={"foo": "bar"},
4950
extra_headers={"foo": "bar"},

0 commit comments

Comments
 (0)