Skip to content

Commit 07345d0

Browse files
authored
Make shallow copies of lists instead of deep copies (#1490)
We were making deep copies, which is (1) inefficient and (2) causes some pickling errors. Instead, this PR just makes shallow copies, calling list.copy(). We do want a shallow copy so that mutations don't affect the original past-end list.
1 parent c913454 commit 07345d0

File tree

3 files changed

+47
-8
lines changed

3 files changed

+47
-8
lines changed

src/agents/items.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from __future__ import annotations
22

33
import abc
4-
import copy
54
from dataclasses import dataclass
65
from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, Union
76

@@ -277,7 +276,7 @@ def input_to_new_input_list(
277276
"role": "user",
278277
}
279278
]
280-
return copy.deepcopy(input)
279+
return input.copy()
281280

282281
@classmethod
283282
def text_message_outputs(cls, items: list[RunItem]) -> str:

src/agents/run.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
from __future__ import annotations
22

33
import asyncio
4-
import copy
54
import inspect
65
from dataclasses import dataclass, field
76
from typing import Any, Callable, Generic, cast
@@ -387,7 +386,7 @@ async def run(
387386
disabled=run_config.tracing_disabled,
388387
):
389388
current_turn = 0
390-
original_input: str | list[TResponseInputItem] = copy.deepcopy(prepared_input)
389+
original_input: str | list[TResponseInputItem] = _copy_str_or_list(prepared_input)
391390
generated_items: list[RunItem] = []
392391
model_responses: list[ModelResponse] = []
393392

@@ -446,7 +445,7 @@ async def run(
446445
starting_agent,
447446
starting_agent.input_guardrails
448447
+ (run_config.input_guardrails or []),
449-
copy.deepcopy(prepared_input),
448+
_copy_str_or_list(prepared_input),
450449
context_wrapper,
451450
),
452451
self._run_single_turn(
@@ -594,7 +593,7 @@ def run_streamed(
594593
)
595594

596595
streamed_result = RunResultStreaming(
597-
input=copy.deepcopy(input),
596+
input=_copy_str_or_list(input),
598597
new_items=[],
599598
current_agent=starting_agent,
600599
raw_responses=[],
@@ -647,7 +646,7 @@ async def _maybe_filter_model_input(
647646

648647
try:
649648
model_input = ModelInputData(
650-
input=copy.deepcopy(effective_input),
649+
input=effective_input.copy(),
651650
instructions=effective_instructions,
652651
)
653652
filter_payload: CallModelData[TContext] = CallModelData(
@@ -786,7 +785,7 @@ async def _start_streaming(
786785
cls._run_input_guardrails_with_queue(
787786
starting_agent,
788787
starting_agent.input_guardrails + (run_config.input_guardrails or []),
789-
copy.deepcopy(ItemHelpers.input_to_new_input_list(prepared_input)),
788+
ItemHelpers.input_to_new_input_list(prepared_input),
790789
context_wrapper,
791790
streamed_result,
792791
current_span,
@@ -1376,3 +1375,9 @@ async def _save_result_to_session(
13761375

13771376

13781377
DEFAULT_AGENT_RUNNER = AgentRunner()
1378+
1379+
1380+
def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]:
1381+
if isinstance(input, str):
1382+
return input
1383+
return input.copy()

tests/test_items_helpers.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
from __future__ import annotations
22

3+
import json
4+
35
from openai.types.responses.response_computer_tool_call import (
46
ActionScreenshot,
57
ResponseComputerToolCall,
@@ -20,8 +22,10 @@
2022
from openai.types.responses.response_output_message_param import ResponseOutputMessageParam
2123
from openai.types.responses.response_output_refusal import ResponseOutputRefusal
2224
from openai.types.responses.response_output_text import ResponseOutputText
25+
from openai.types.responses.response_output_text_param import ResponseOutputTextParam
2326
from openai.types.responses.response_reasoning_item import ResponseReasoningItem, Summary
2427
from openai.types.responses.response_reasoning_item_param import ResponseReasoningItemParam
28+
from pydantic import TypeAdapter
2529

2630
from agents import (
2731
Agent,
@@ -290,3 +294,34 @@ def test_to_input_items_for_reasoning() -> None:
290294
print(converted_dict)
291295
print(expected)
292296
assert converted_dict == expected
297+
298+
299+
def test_input_to_new_input_list_copies_the_ones_produced_by_pydantic() -> None:
300+
# Given a list of message dictionaries, ensure the returned list is a deep copy.
301+
original = ResponseOutputMessageParam(
302+
id="a75654dc-7492-4d1c-bce0-89e8312fbdd7",
303+
content=[
304+
ResponseOutputTextParam(
305+
type="output_text",
306+
text="Hey, what's up?",
307+
annotations=[],
308+
)
309+
],
310+
role="assistant",
311+
status="completed",
312+
type="message",
313+
)
314+
original_json = json.dumps(original)
315+
output_item = TypeAdapter(ResponseOutputMessageParam).validate_json(original_json)
316+
new_list = ItemHelpers.input_to_new_input_list([output_item])
317+
assert len(new_list) == 1
318+
assert new_list[0]["id"] == original["id"] # type: ignore
319+
size = 0
320+
for i, item in enumerate(original["content"]):
321+
size += 1 # pydantic_core._pydantic_core.ValidatorIterator does not support len()
322+
assert item["type"] == original["content"][i]["type"] # type: ignore
323+
assert item["text"] == original["content"][i]["text"] # type: ignore
324+
assert size == 1
325+
assert new_list[0]["role"] == original["role"] # type: ignore
326+
assert new_list[0]["status"] == original["status"] # type: ignore
327+
assert new_list[0]["type"] == original["type"]

0 commit comments

Comments
 (0)