From 4e28a424e6afd60040e3bdf7c76eebb63bc0c407 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 21 Aug 2025 16:10:05 -0500
Subject: [PATCH] release: 1.101.0 (#2577)
* feat(api): adding support for /v1/conversations to the API
* chore: update github action
* feat(api): Add connectors support for MCP tool
* release: 1.101.0
---------
Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com>
---
.github/workflows/ci.yml | 4 +-
.release-please-manifest.json | 2 +-
.stats.yml | 8 +-
CHANGELOG.md | 14 +
api.md | 49 ++
pyproject.toml | 2 +-
src/openai/__init__.py | 1 +
src/openai/_client.py | 38 ++
src/openai/_module_client.py | 8 +
src/openai/_version.py | 2 +-
src/openai/pagination.py | 67 ++-
.../resources/conversations/__init__.py | 33 ++
.../resources/conversations/conversations.py | 474 +++++++++++++++
src/openai/resources/conversations/items.py | 553 ++++++++++++++++++
src/openai/resources/responses/input_items.py | 8 -
src/openai/resources/responses/responses.py | 60 +-
src/openai/types/conversations/__init__.py | 27 +
.../computer_screenshot_content.py | 22 +
.../container_file_citation_body.py | 27 +
.../types/conversations/conversation.py | 30 +
.../conversation_create_params.py | 26 +
.../conversation_deleted_resource.py | 15 +
.../types/conversations/conversation_item.py | 209 +++++++
.../conversations/conversation_item_list.py | 26 +
.../conversation_update_params.py | 19 +
.../types/conversations/file_citation_body.py | 21 +
.../types/conversations/input_file_content.py | 22 +
.../conversations/input_image_content.py | 28 +
.../types/conversations/input_text_content.py | 15 +
.../types/conversations/item_create_params.py | 24 +
.../types/conversations/item_list_params.py | 48 ++
.../conversations/item_retrieve_params.py | 22 +
src/openai/types/conversations/lob_prob.py | 18 +
src/openai/types/conversations/message.py | 56 ++
.../conversations/output_text_content.py | 30 +
.../types/conversations/refusal_content.py | 15 +
.../conversations/summary_text_content.py | 13 +
.../types/conversations/text_content.py | 13 +
.../types/conversations/top_log_prob.py | 15 +
.../types/conversations/url_citation_body.py | 24 +
...create_eval_completions_run_data_source.py | 26 +-
..._eval_completions_run_data_source_param.py | 24 +-
src/openai/types/responses/__init__.py | 1 +
.../types/responses/input_item_list_params.py | 3 -
src/openai/types/responses/response.py | 15 +-
.../responses/response_conversation_param.py | 12 +
.../types/responses/response_create_params.py | 14 +
src/openai/types/responses/tool.py | 84 ++-
src/openai/types/responses/tool_param.py | 82 ++-
tests/api_resources/conversations/__init__.py | 1 +
.../api_resources/conversations/test_items.py | 491 ++++++++++++++++
.../responses/test_input_items.py | 2 -
tests/api_resources/test_conversations.py | 341 +++++++++++
tests/api_resources/test_responses.py | 4 +
54 files changed, 3114 insertions(+), 74 deletions(-)
create mode 100644 src/openai/resources/conversations/__init__.py
create mode 100644 src/openai/resources/conversations/conversations.py
create mode 100644 src/openai/resources/conversations/items.py
create mode 100644 src/openai/types/conversations/__init__.py
create mode 100644 src/openai/types/conversations/computer_screenshot_content.py
create mode 100644 src/openai/types/conversations/container_file_citation_body.py
create mode 100644 src/openai/types/conversations/conversation.py
create mode 100644 src/openai/types/conversations/conversation_create_params.py
create mode 100644 src/openai/types/conversations/conversation_deleted_resource.py
create mode 100644 src/openai/types/conversations/conversation_item.py
create mode 100644 src/openai/types/conversations/conversation_item_list.py
create mode 100644 src/openai/types/conversations/conversation_update_params.py
create mode 100644 src/openai/types/conversations/file_citation_body.py
create mode 100644 src/openai/types/conversations/input_file_content.py
create mode 100644 src/openai/types/conversations/input_image_content.py
create mode 100644 src/openai/types/conversations/input_text_content.py
create mode 100644 src/openai/types/conversations/item_create_params.py
create mode 100644 src/openai/types/conversations/item_list_params.py
create mode 100644 src/openai/types/conversations/item_retrieve_params.py
create mode 100644 src/openai/types/conversations/lob_prob.py
create mode 100644 src/openai/types/conversations/message.py
create mode 100644 src/openai/types/conversations/output_text_content.py
create mode 100644 src/openai/types/conversations/refusal_content.py
create mode 100644 src/openai/types/conversations/summary_text_content.py
create mode 100644 src/openai/types/conversations/text_content.py
create mode 100644 src/openai/types/conversations/top_log_prob.py
create mode 100644 src/openai/types/conversations/url_citation_body.py
create mode 100644 src/openai/types/responses/response_conversation_param.py
create mode 100644 tests/api_resources/conversations/__init__.py
create mode 100644 tests/api_resources/conversations/test_items.py
create mode 100644 tests/api_resources/test_conversations.py
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 8067386d5f..5e56aae09a 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -36,7 +36,7 @@ jobs:
run: ./scripts/lint
build:
- if: github.repository == 'stainless-sdks/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork)
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
timeout-minutes: 10
name: build
permissions:
@@ -61,12 +61,14 @@ jobs:
run: rye build
- name: Get GitHub OIDC Token
+ if: github.repository == 'stainless-sdks/openai-python'
id: github-oidc
uses: actions/github-script@v6
with:
script: core.setOutput('github_token', await core.getIDToken());
- name: Upload tarball
+ if: github.repository == 'stainless-sdks/openai-python'
env:
URL: https://pkg.stainless.com/s
AUTH: ${{ steps.github-oidc.outputs.github_token }}
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index f3cdcd790c..070375331a 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "1.100.3"
+ ".": "1.101.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index d4994342f7..f2d5304a5b 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 111
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml
-openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063
-config_hash: 4870312b04f48fd717ea4151053e7fb9
+configured_endpoints: 119
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ddbdf9343316047e8a773c54fb24e4a8d225955e202a1888fde6f9c8898ebf98.yml
+openapi_spec_hash: 9802f6dd381558466c897f6e387e06ca
+config_hash: fe0ea26680ac2075a6cd66416aefe7db
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c2f89cb09b..44b25e0a4c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,19 @@
# Changelog
+## 1.101.0 (2025-08-21)
+
+Full Changelog: [v1.100.3...v1.101.0](https://github.com/openai/openai-python/compare/v1.100.3...v1.101.0)
+
+### Features
+
+* **api:** Add connectors support for MCP tool ([a47f962](https://github.com/openai/openai-python/commit/a47f962daf579c142b8af5579be732772b688a29))
+* **api:** adding support for /v1/conversations to the API ([e30bcbc](https://github.com/openai/openai-python/commit/e30bcbc0cb7c827af779bee6971f976261abfb67))
+
+
+### Chores
+
+* update github action ([7333b28](https://github.com/openai/openai-python/commit/7333b282718a5f6977f30e1a2548207b3a089bd4))
+
## 1.100.3 (2025-08-20)
Full Changelog: [v1.100.2...v1.100.3](https://github.com/openai/openai-python/compare/v1.100.2...v1.100.3)
diff --git a/api.md b/api.md
index 92b068b134..7eb62e67f2 100644
--- a/api.md
+++ b/api.md
@@ -751,6 +751,7 @@ from openai.types.responses import (
ResponseContent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
+ ResponseConversationParam,
ResponseCreatedEvent,
ResponseCustomToolCall,
ResponseCustomToolCallInputDeltaEvent,
@@ -854,6 +855,54 @@ Methods:
- client.responses.input_items.list(response_id, \*\*params) -> SyncCursorPage[ResponseItem]
+# Conversations
+
+Types:
+
+```python
+from openai.types.conversations import (
+ ComputerScreenshotContent,
+ ContainerFileCitationBody,
+ Conversation,
+ ConversationDeleted,
+ ConversationDeletedResource,
+ FileCitationBody,
+ InputFileContent,
+ InputImageContent,
+ InputTextContent,
+ LobProb,
+ Message,
+ OutputTextContent,
+ RefusalContent,
+ SummaryTextContent,
+ TextContent,
+ TopLogProb,
+ URLCitationBody,
+)
+```
+
+Methods:
+
+- client.conversations.create(\*\*params) -> Conversation
+- client.conversations.retrieve(conversation_id) -> Conversation
+- client.conversations.update(conversation_id, \*\*params) -> Conversation
+- client.conversations.delete(conversation_id) -> ConversationDeletedResource
+
+## Items
+
+Types:
+
+```python
+from openai.types.conversations import ConversationItem, ConversationItemList
+```
+
+Methods:
+
+- client.conversations.items.create(conversation_id, \*\*params) -> ConversationItemList
+- client.conversations.items.retrieve(item_id, \*, conversation_id, \*\*params) -> ConversationItem
+- client.conversations.items.list(conversation_id, \*\*params) -> SyncConversationCursorPage[ConversationItem]
+- client.conversations.items.delete(item_id, \*, conversation_id) -> Conversation
+
# Evals
Types:
diff --git a/pyproject.toml b/pyproject.toml
index 4d1055bfce..8198b178be 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "1.100.3"
+version = "1.101.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/openai/__init__.py b/src/openai/__init__.py
index 226fed9554..b944fbed5e 100644
--- a/src/openai/__init__.py
+++ b/src/openai/__init__.py
@@ -386,5 +386,6 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
completions as completions,
fine_tuning as fine_tuning,
moderations as moderations,
+ conversations as conversations,
vector_stores as vector_stores,
)
diff --git a/src/openai/_client.py b/src/openai/_client.py
index ed9b46f4b0..b99db786a7 100644
--- a/src/openai/_client.py
+++ b/src/openai/_client.py
@@ -51,6 +51,7 @@
completions,
fine_tuning,
moderations,
+ conversations,
vector_stores,
)
from .resources.files import Files, AsyncFiles
@@ -69,6 +70,7 @@
from .resources.responses.responses import Responses, AsyncResponses
from .resources.containers.containers import Containers, AsyncContainers
from .resources.fine_tuning.fine_tuning import FineTuning, AsyncFineTuning
+ from .resources.conversations.conversations import Conversations, AsyncConversations
from .resources.vector_stores.vector_stores import VectorStores, AsyncVectorStores
__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"]
@@ -254,6 +256,12 @@ def responses(self) -> Responses:
return Responses(self)
+ @cached_property
+ def conversations(self) -> Conversations:
+ from .resources.conversations import Conversations
+
+ return Conversations(self)
+
@cached_property
def evals(self) -> Evals:
from .resources.evals import Evals
@@ -573,6 +581,12 @@ def responses(self) -> AsyncResponses:
return AsyncResponses(self)
+ @cached_property
+ def conversations(self) -> AsyncConversations:
+ from .resources.conversations import AsyncConversations
+
+ return AsyncConversations(self)
+
@cached_property
def evals(self) -> AsyncEvals:
from .resources.evals import AsyncEvals
@@ -802,6 +816,12 @@ def responses(self) -> responses.ResponsesWithRawResponse:
return ResponsesWithRawResponse(self._client.responses)
+ @cached_property
+ def conversations(self) -> conversations.ConversationsWithRawResponse:
+ from .resources.conversations import ConversationsWithRawResponse
+
+ return ConversationsWithRawResponse(self._client.conversations)
+
@cached_property
def evals(self) -> evals.EvalsWithRawResponse:
from .resources.evals import EvalsWithRawResponse
@@ -905,6 +925,12 @@ def responses(self) -> responses.AsyncResponsesWithRawResponse:
return AsyncResponsesWithRawResponse(self._client.responses)
+ @cached_property
+ def conversations(self) -> conversations.AsyncConversationsWithRawResponse:
+ from .resources.conversations import AsyncConversationsWithRawResponse
+
+ return AsyncConversationsWithRawResponse(self._client.conversations)
+
@cached_property
def evals(self) -> evals.AsyncEvalsWithRawResponse:
from .resources.evals import AsyncEvalsWithRawResponse
@@ -1008,6 +1034,12 @@ def responses(self) -> responses.ResponsesWithStreamingResponse:
return ResponsesWithStreamingResponse(self._client.responses)
+ @cached_property
+ def conversations(self) -> conversations.ConversationsWithStreamingResponse:
+ from .resources.conversations import ConversationsWithStreamingResponse
+
+ return ConversationsWithStreamingResponse(self._client.conversations)
+
@cached_property
def evals(self) -> evals.EvalsWithStreamingResponse:
from .resources.evals import EvalsWithStreamingResponse
@@ -1111,6 +1143,12 @@ def responses(self) -> responses.AsyncResponsesWithStreamingResponse:
return AsyncResponsesWithStreamingResponse(self._client.responses)
+ @cached_property
+ def conversations(self) -> conversations.AsyncConversationsWithStreamingResponse:
+ from .resources.conversations import AsyncConversationsWithStreamingResponse
+
+ return AsyncConversationsWithStreamingResponse(self._client.conversations)
+
@cached_property
def evals(self) -> evals.AsyncEvalsWithStreamingResponse:
from .resources.evals import AsyncEvalsWithStreamingResponse
diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py
index a80e939300..5c8df24014 100644
--- a/src/openai/_module_client.py
+++ b/src/openai/_module_client.py
@@ -22,6 +22,7 @@
from .resources.responses.responses import Responses
from .resources.containers.containers import Containers
from .resources.fine_tuning.fine_tuning import FineTuning
+ from .resources.conversations.conversations import Conversations
from .resources.vector_stores.vector_stores import VectorStores
from . import _load_client
@@ -130,6 +131,12 @@ def __load__(self) -> VectorStores:
return _load_client().vector_stores
+class ConversationsProxy(LazyProxy["Conversations"]):
+ @override
+ def __load__(self) -> Conversations:
+ return _load_client().conversations
+
+
chat: Chat = ChatProxy().__as_proxied__()
beta: Beta = BetaProxy().__as_proxied__()
files: Files = FilesProxy().__as_proxied__()
@@ -147,3 +154,4 @@ def __load__(self) -> VectorStores:
moderations: Moderations = ModerationsProxy().__as_proxied__()
fine_tuning: FineTuning = FineTuningProxy().__as_proxied__()
vector_stores: VectorStores = VectorStoresProxy().__as_proxied__()
+conversations: Conversations = ConversationsProxy().__as_proxied__()
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 9881b45247..802084af5d 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "1.100.3" # x-release-please-version
+__version__ = "1.101.0" # x-release-please-version
diff --git a/src/openai/pagination.py b/src/openai/pagination.py
index a59cced854..4dd3788aa3 100644
--- a/src/openai/pagination.py
+++ b/src/openai/pagination.py
@@ -5,7 +5,14 @@
from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage
-__all__ = ["SyncPage", "AsyncPage", "SyncCursorPage", "AsyncCursorPage"]
+__all__ = [
+ "SyncPage",
+ "AsyncPage",
+ "SyncCursorPage",
+ "AsyncCursorPage",
+ "SyncConversationCursorPage",
+ "AsyncConversationCursorPage",
+]
_T = TypeVar("_T")
@@ -123,3 +130,61 @@ def next_page_info(self) -> Optional[PageInfo]:
return None
return PageInfo(params={"after": item.id})
+
+
+class SyncConversationCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]):
+ data: List[_T]
+ has_more: Optional[bool] = None
+ last_id: Optional[str] = None
+
+ @override
+ def _get_page_items(self) -> List[_T]:
+ data = self.data
+ if not data:
+ return []
+ return data
+
+ @override
+ def has_next_page(self) -> bool:
+ has_more = self.has_more
+ if has_more is not None and has_more is False:
+ return False
+
+ return super().has_next_page()
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ last_id = self.last_id
+ if not last_id:
+ return None
+
+ return PageInfo(params={"after": last_id})
+
+
+class AsyncConversationCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]):
+ data: List[_T]
+ has_more: Optional[bool] = None
+ last_id: Optional[str] = None
+
+ @override
+ def _get_page_items(self) -> List[_T]:
+ data = self.data
+ if not data:
+ return []
+ return data
+
+ @override
+ def has_next_page(self) -> bool:
+ has_more = self.has_more
+ if has_more is not None and has_more is False:
+ return False
+
+ return super().has_next_page()
+
+ @override
+ def next_page_info(self) -> Optional[PageInfo]:
+ last_id = self.last_id
+ if not last_id:
+ return None
+
+ return PageInfo(params={"after": last_id})
diff --git a/src/openai/resources/conversations/__init__.py b/src/openai/resources/conversations/__init__.py
new file mode 100644
index 0000000000..c6c4fd6ee4
--- /dev/null
+++ b/src/openai/resources/conversations/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .items import (
+ Items,
+ AsyncItems,
+ ItemsWithRawResponse,
+ AsyncItemsWithRawResponse,
+ ItemsWithStreamingResponse,
+ AsyncItemsWithStreamingResponse,
+)
+from .conversations import (
+ Conversations,
+ AsyncConversations,
+ ConversationsWithRawResponse,
+ AsyncConversationsWithRawResponse,
+ ConversationsWithStreamingResponse,
+ AsyncConversationsWithStreamingResponse,
+)
+
+__all__ = [
+ "Items",
+ "AsyncItems",
+ "ItemsWithRawResponse",
+ "AsyncItemsWithRawResponse",
+ "ItemsWithStreamingResponse",
+ "AsyncItemsWithStreamingResponse",
+ "Conversations",
+ "AsyncConversations",
+ "ConversationsWithRawResponse",
+ "AsyncConversationsWithRawResponse",
+ "ConversationsWithStreamingResponse",
+ "AsyncConversationsWithStreamingResponse",
+]
diff --git a/src/openai/resources/conversations/conversations.py b/src/openai/resources/conversations/conversations.py
new file mode 100644
index 0000000000..13bc1fb1ce
--- /dev/null
+++ b/src/openai/resources/conversations/conversations.py
@@ -0,0 +1,474 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Iterable, Optional
+
+import httpx
+
+from ... import _legacy_response
+from .items import (
+ Items,
+ AsyncItems,
+ ItemsWithRawResponse,
+ AsyncItemsWithRawResponse,
+ ItemsWithStreamingResponse,
+ AsyncItemsWithStreamingResponse,
+)
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ..._base_client import make_request_options
+from ...types.conversations import conversation_create_params, conversation_update_params
+from ...types.shared_params.metadata import Metadata
+from ...types.conversations.conversation import Conversation
+from ...types.responses.response_input_item_param import ResponseInputItemParam
+from ...types.conversations.conversation_deleted_resource import ConversationDeletedResource
+
+__all__ = ["Conversations", "AsyncConversations"]
+
+
+class Conversations(SyncAPIResource):
+ @cached_property
+ def items(self) -> Items:
+ return Items(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ConversationsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ConversationsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ConversationsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ConversationsWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Create a conversation with the given ID.
+
+ Args:
+ items: Initial items to include in the conversation context. You may add up to 20 items
+ at a time.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing
+ additional information about the object in a structured format.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/conversations",
+ body=maybe_transform(
+ {
+ "items": items,
+ "metadata": metadata,
+ },
+ conversation_create_params.ConversationCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+ def retrieve(
+ self,
+ conversation_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Get a conversation with the given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return self._get(
+ f"/conversations/{conversation_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+ def update(
+ self,
+ conversation_id: str,
+ *,
+ metadata: Dict[str, str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Update a conversation's metadata with the given ID.
+
+ Args:
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return self._post(
+ f"/conversations/{conversation_id}",
+ body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+ def delete(
+ self,
+ conversation_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConversationDeletedResource:
+ """
+ Delete a conversation with the given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return self._delete(
+ f"/conversations/{conversation_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConversationDeletedResource,
+ )
+
+
+class AsyncConversations(AsyncAPIResource):
+ @cached_property
+ def items(self) -> AsyncItems:
+ return AsyncItems(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncConversationsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncConversationsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncConversationsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN,
+ metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Create a conversation with the given ID.
+
+ Args:
+ items: Initial items to include in the conversation context. You may add up to 20 items
+ at a time.
+
+ metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing
+ additional information about the object in a structured format.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/conversations",
+ body=await async_maybe_transform(
+ {
+ "items": items,
+ "metadata": metadata,
+ },
+ conversation_create_params.ConversationCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+ async def retrieve(
+ self,
+ conversation_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Get a conversation with the given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return await self._get(
+ f"/conversations/{conversation_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+ async def update(
+ self,
+ conversation_id: str,
+ *,
+ metadata: Dict[str, str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Update a conversation's metadata with the given ID.
+
+ Args:
+ metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
+ for storing additional information about the object in a structured format, and
+ querying for objects via API or the dashboard. Keys are strings with a maximum
+ length of 64 characters. Values are strings with a maximum length of 512
+ characters.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return await self._post(
+ f"/conversations/{conversation_id}",
+ body=await async_maybe_transform(
+ {"metadata": metadata}, conversation_update_params.ConversationUpdateParams
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+ async def delete(
+ self,
+ conversation_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConversationDeletedResource:
+ """
+ Delete a conversation with the given ID.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return await self._delete(
+ f"/conversations/{conversation_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ConversationDeletedResource,
+ )
+
+
+class ConversationsWithRawResponse:
+ def __init__(self, conversations: Conversations) -> None:
+ self._conversations = conversations
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ conversations.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ conversations.retrieve,
+ )
+ self.update = _legacy_response.to_raw_response_wrapper(
+ conversations.update,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ conversations.delete,
+ )
+
+ @cached_property
+ def items(self) -> ItemsWithRawResponse:
+ return ItemsWithRawResponse(self._conversations.items)
+
+
+class AsyncConversationsWithRawResponse:
+ def __init__(self, conversations: AsyncConversations) -> None:
+ self._conversations = conversations
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ conversations.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ conversations.retrieve,
+ )
+ self.update = _legacy_response.async_to_raw_response_wrapper(
+ conversations.update,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ conversations.delete,
+ )
+
+ @cached_property
+ def items(self) -> AsyncItemsWithRawResponse:
+ return AsyncItemsWithRawResponse(self._conversations.items)
+
+
+class ConversationsWithStreamingResponse:
+ def __init__(self, conversations: Conversations) -> None:
+ self._conversations = conversations
+
+ self.create = to_streamed_response_wrapper(
+ conversations.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ conversations.retrieve,
+ )
+ self.update = to_streamed_response_wrapper(
+ conversations.update,
+ )
+ self.delete = to_streamed_response_wrapper(
+ conversations.delete,
+ )
+
+ @cached_property
+ def items(self) -> ItemsWithStreamingResponse:
+ return ItemsWithStreamingResponse(self._conversations.items)
+
+
+class AsyncConversationsWithStreamingResponse:
+ def __init__(self, conversations: AsyncConversations) -> None:
+ self._conversations = conversations
+
+ self.create = async_to_streamed_response_wrapper(
+ conversations.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ conversations.retrieve,
+ )
+ self.update = async_to_streamed_response_wrapper(
+ conversations.update,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ conversations.delete,
+ )
+
+ @cached_property
+ def items(self) -> AsyncItemsWithStreamingResponse:
+ return AsyncItemsWithStreamingResponse(self._conversations.items)
diff --git a/src/openai/resources/conversations/items.py b/src/openai/resources/conversations/items.py
new file mode 100644
index 0000000000..1e696a79ed
--- /dev/null
+++ b/src/openai/resources/conversations/items.py
@@ -0,0 +1,553 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Any, List, Iterable, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ... import _legacy_response
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
+from ...pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from ..._base_client import AsyncPaginator, make_request_options
+from ...types.conversations import item_list_params, item_create_params, item_retrieve_params
+from ...types.conversations.conversation import Conversation
+from ...types.responses.response_includable import ResponseIncludable
+from ...types.conversations.conversation_item import ConversationItem
+from ...types.responses.response_input_item_param import ResponseInputItemParam
+from ...types.conversations.conversation_item_list import ConversationItemList
+
+__all__ = ["Items", "AsyncItems"]
+
+
+class Items(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ItemsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return ItemsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ItemsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return ItemsWithStreamingResponse(self)
+
+ def create(
+ self,
+ conversation_id: str,
+ *,
+ items: Iterable[ResponseInputItemParam],
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConversationItemList:
+ """
+ Create items in a conversation with the given ID.
+
+ Args:
+ items: The items to add to the conversation. You may add up to 20 items at a time.
+
+ include: Additional fields to include in the response. See the `include` parameter for
+ [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+ for more information.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return self._post(
+ f"/conversations/{conversation_id}/items",
+ body=maybe_transform({"items": items}, item_create_params.ItemCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"include": include}, item_create_params.ItemCreateParams),
+ ),
+ cast_to=ConversationItemList,
+ )
+
+ def retrieve(
+ self,
+ item_id: str,
+ *,
+ conversation_id: str,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConversationItem:
+ """
+ Get a single item from a conversation with the given IDs.
+
+ Args:
+ include: Additional fields to include in the response. See the `include` parameter for
+ [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+ for more information.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ if not item_id:
+ raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+ return cast(
+ ConversationItem,
+ self._get(
+ f"/conversations/{conversation_id}/items/{item_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
+ ),
+ cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
+ ),
+ )
+
+ def list(
+ self,
+ conversation_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> SyncConversationCursorPage[ConversationItem]:
+ """
+ List all items for a conversation with the given ID.
+
+ Args:
+ after: An item ID to list items after, used in pagination.
+
+ include: Specify additional output data to include in the model response. Currently
+ supported values are:
+
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: The order to return the input items in. Default is `desc`.
+
+ - `asc`: Return the input items in ascending order.
+ - `desc`: Return the input items in descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return self._get_api_list(
+ f"/conversations/{conversation_id}/items",
+ page=SyncConversationCursorPage[ConversationItem],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "include": include,
+ "limit": limit,
+ "order": order,
+ },
+ item_list_params.ItemListParams,
+ ),
+ ),
+ model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
+ )
+
+ def delete(
+ self,
+ item_id: str,
+ *,
+ conversation_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Delete an item from a conversation with the given IDs.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ if not item_id:
+ raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+ return self._delete(
+ f"/conversations/{conversation_id}/items/{item_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+
+class AsyncItems(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncItemsWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncItemsWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncItemsWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/openai/openai-python#with_streaming_response
+ """
+ return AsyncItemsWithStreamingResponse(self)
+
+ async def create(
+ self,
+ conversation_id: str,
+ *,
+ items: Iterable[ResponseInputItemParam],
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConversationItemList:
+ """
+ Create items in a conversation with the given ID.
+
+ Args:
+ items: The items to add to the conversation. You may add up to 20 items at a time.
+
+ include: Additional fields to include in the response. See the `include` parameter for
+ [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+ for more information.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return await self._post(
+ f"/conversations/{conversation_id}/items",
+ body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"include": include}, item_create_params.ItemCreateParams),
+ ),
+ cast_to=ConversationItemList,
+ )
+
+ async def retrieve(
+ self,
+ item_id: str,
+ *,
+ conversation_id: str,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> ConversationItem:
+ """
+ Get a single item from a conversation with the given IDs.
+
+ Args:
+ include: Additional fields to include in the response. See the `include` parameter for
+ [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+ for more information.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ if not item_id:
+ raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+ return cast(
+ ConversationItem,
+ await self._get(
+ f"/conversations/{conversation_id}/items/{item_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams),
+ ),
+ cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
+ ),
+ )
+
+ def list(
+ self,
+ conversation_id: str,
+ *,
+ after: str | NotGiven = NOT_GIVEN,
+ include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
+ limit: int | NotGiven = NOT_GIVEN,
+ order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AsyncPaginator[ConversationItem, AsyncConversationCursorPage[ConversationItem]]:
+ """
+ List all items for a conversation with the given ID.
+
+ Args:
+ after: An item ID to list items after, used in pagination.
+
+ include: Specify additional output data to include in the model response. Currently
+ supported values are:
+
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+
+ limit: A limit on the number of objects to be returned. Limit can range between 1 and
+ 100, and the default is 20.
+
+ order: The order to return the input items in. Default is `desc`.
+
+ - `asc`: Return the input items in ascending order.
+ - `desc`: Return the input items in descending order.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ return self._get_api_list(
+ f"/conversations/{conversation_id}/items",
+ page=AsyncConversationCursorPage[ConversationItem],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "include": include,
+ "limit": limit,
+ "order": order,
+ },
+ item_list_params.ItemListParams,
+ ),
+ ),
+ model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system
+ )
+
+ async def delete(
+ self,
+ item_id: str,
+ *,
+ conversation_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> Conversation:
+ """
+ Delete an item from a conversation with the given IDs.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not conversation_id:
+ raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}")
+ if not item_id:
+ raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}")
+ return await self._delete(
+ f"/conversations/{conversation_id}/items/{item_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=Conversation,
+ )
+
+
+class ItemsWithRawResponse:
+ def __init__(self, items: Items) -> None:
+ self._items = items
+
+ self.create = _legacy_response.to_raw_response_wrapper(
+ items.create,
+ )
+ self.retrieve = _legacy_response.to_raw_response_wrapper(
+ items.retrieve,
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ items.list,
+ )
+ self.delete = _legacy_response.to_raw_response_wrapper(
+ items.delete,
+ )
+
+
+class AsyncItemsWithRawResponse:
+ def __init__(self, items: AsyncItems) -> None:
+ self._items = items
+
+ self.create = _legacy_response.async_to_raw_response_wrapper(
+ items.create,
+ )
+ self.retrieve = _legacy_response.async_to_raw_response_wrapper(
+ items.retrieve,
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ items.list,
+ )
+ self.delete = _legacy_response.async_to_raw_response_wrapper(
+ items.delete,
+ )
+
+
+class ItemsWithStreamingResponse:
+ def __init__(self, items: Items) -> None:
+ self._items = items
+
+ self.create = to_streamed_response_wrapper(
+ items.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ items.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ items.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ items.delete,
+ )
+
+
+class AsyncItemsWithStreamingResponse:
+ def __init__(self, items: AsyncItems) -> None:
+ self._items = items
+
+ self.create = async_to_streamed_response_wrapper(
+ items.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ items.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ items.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ items.delete,
+ )
diff --git a/src/openai/resources/responses/input_items.py b/src/openai/resources/responses/input_items.py
index a425a65c3e..9f3ef637ce 100644
--- a/src/openai/resources/responses/input_items.py
+++ b/src/openai/resources/responses/input_items.py
@@ -47,7 +47,6 @@ def list(
response_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
@@ -64,8 +63,6 @@ def list(
Args:
after: An item ID to list items after, used in pagination.
- before: An item ID to list items before, used in pagination.
-
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
@@ -98,7 +95,6 @@ def list(
query=maybe_transform(
{
"after": after,
- "before": before,
"include": include,
"limit": limit,
"order": order,
@@ -135,7 +131,6 @@ def list(
response_id: str,
*,
after: str | NotGiven = NOT_GIVEN,
- before: str | NotGiven = NOT_GIVEN,
include: List[ResponseIncludable] | NotGiven = NOT_GIVEN,
limit: int | NotGiven = NOT_GIVEN,
order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
@@ -152,8 +147,6 @@ def list(
Args:
after: An item ID to list items after, used in pagination.
- before: An item ID to list items before, used in pagination.
-
include: Additional fields to include in the response. See the `include` parameter for
Response creation above for more information.
@@ -186,7 +179,6 @@ def list(
query=maybe_transform(
{
"after": after,
- "before": before,
"include": include,
"limit": limit,
"order": order,
diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py
index 375f8b7e71..d0862f5d76 100644
--- a/src/openai/resources/responses/responses.py
+++ b/src/openai/resources/responses/responses.py
@@ -77,6 +77,7 @@ def create(
self,
*,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -127,6 +128,11 @@ def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ conversation: The conversation that this response belongs to. Items from this conversation are
+ prepended to `input_items` for this response request. Input items and output
+ items from this response are automatically added to this conversation after this
+ response completes.
+
include: Specify additional output data to include in the model response. Currently
supported values are:
@@ -187,6 +193,7 @@ def create(
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -305,6 +312,7 @@ def create(
*,
stream: Literal[True],
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -361,6 +369,11 @@ def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ conversation: The conversation that this response belongs to. Items from this conversation are
+ prepended to `input_items` for this response request. Input items and output
+ items from this response are automatically added to this conversation after this
+ response completes.
+
include: Specify additional output data to include in the model response. Currently
supported values are:
@@ -421,6 +434,7 @@ def create(
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -532,6 +546,7 @@ def create(
*,
stream: bool,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -588,6 +603,11 @@ def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ conversation: The conversation that this response belongs to. Items from this conversation are
+ prepended to `input_items` for this response request. Input items and output
+ items from this response are automatically added to this conversation after this
+ response completes.
+
include: Specify additional output data to include in the model response. Currently
supported values are:
@@ -648,6 +668,7 @@ def create(
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -757,6 +778,7 @@ def create(
self,
*,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -794,6 +816,7 @@ def create(
body=maybe_transform(
{
"background": background,
+ "conversation": conversation,
"include": include,
"input": input,
"instructions": instructions,
@@ -866,7 +889,7 @@ def stream(
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -1009,6 +1032,7 @@ def parse(
*,
text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1027,7 +1051,7 @@ def parse(
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -1065,6 +1089,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
body=maybe_transform(
{
"background": background,
+ "conversation": conversation,
"include": include,
"input": input,
"instructions": instructions,
@@ -1440,6 +1465,7 @@ async def create(
self,
*,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1490,6 +1516,11 @@ async def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ conversation: The conversation that this response belongs to. Items from this conversation are
+ prepended to `input_items` for this response request. Input items and output
+ items from this response are automatically added to this conversation after this
+ response completes.
+
include: Specify additional output data to include in the model response. Currently
supported values are:
@@ -1550,6 +1581,7 @@ async def create(
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -1668,6 +1700,7 @@ async def create(
*,
stream: Literal[True],
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1724,6 +1757,11 @@ async def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ conversation: The conversation that this response belongs to. Items from this conversation are
+ prepended to `input_items` for this response request. Input items and output
+ items from this response are automatically added to this conversation after this
+ response completes.
+
include: Specify additional output data to include in the model response. Currently
supported values are:
@@ -1784,6 +1822,7 @@ async def create(
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -1895,6 +1934,7 @@ async def create(
*,
stream: bool,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -1951,6 +1991,11 @@ async def create(
background: Whether to run the model response in the background.
[Learn more](https://platform.openai.com/docs/guides/background).
+ conversation: The conversation that this response belongs to. Items from this conversation are
+ prepended to `input_items` for this response request. Input items and output
+ items from this response are automatically added to this conversation after this
+ response completes.
+
include: Specify additional output data to include in the model response. Currently
supported values are:
@@ -2011,6 +2056,7 @@ async def create(
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
prompt: Reference to a prompt template and its variables.
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
@@ -2120,6 +2166,7 @@ async def create(
self,
*,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -2157,6 +2204,7 @@ async def create(
body=await async_maybe_transform(
{
"background": background,
+ "conversation": conversation,
"include": include,
"input": input,
"instructions": instructions,
@@ -2229,7 +2277,7 @@ def stream(
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2261,7 +2309,7 @@ def stream(
store: Optional[bool] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
top_p: Optional[float] | NotGiven = NOT_GIVEN,
truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN,
@@ -2376,6 +2424,7 @@ async def parse(
*,
text_format: type[TextFormatT] | NotGiven = NOT_GIVEN,
background: Optional[bool] | NotGiven = NOT_GIVEN,
+ conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN,
include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN,
input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
@@ -2394,7 +2443,7 @@ async def parse(
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN,
temperature: Optional[float] | NotGiven = NOT_GIVEN,
- text: ResponseTextConfigParam | NotGiven = NOT_GIVEN,
+ text: ResponseTextConfigParam| NotGiven = NOT_GIVEN,
tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN,
tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN,
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
@@ -2432,6 +2481,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]:
body=maybe_transform(
{
"background": background,
+ "conversation": conversation,
"include": include,
"input": input,
"instructions": instructions,
diff --git a/src/openai/types/conversations/__init__.py b/src/openai/types/conversations/__init__.py
new file mode 100644
index 0000000000..538966db4f
--- /dev/null
+++ b/src/openai/types/conversations/__init__.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .message import Message as Message
+from .lob_prob import LobProb as LobProb
+from .conversation import Conversation as Conversation
+from .text_content import TextContent as TextContent
+from .top_log_prob import TopLogProb as TopLogProb
+from .refusal_content import RefusalContent as RefusalContent
+from .item_list_params import ItemListParams as ItemListParams
+from .conversation_item import ConversationItem as ConversationItem
+from .url_citation_body import URLCitationBody as URLCitationBody
+from .file_citation_body import FileCitationBody as FileCitationBody
+from .input_file_content import InputFileContent as InputFileContent
+from .input_text_content import InputTextContent as InputTextContent
+from .item_create_params import ItemCreateParams as ItemCreateParams
+from .input_image_content import InputImageContent as InputImageContent
+from .output_text_content import OutputTextContent as OutputTextContent
+from .item_retrieve_params import ItemRetrieveParams as ItemRetrieveParams
+from .summary_text_content import SummaryTextContent as SummaryTextContent
+from .conversation_item_list import ConversationItemList as ConversationItemList
+from .conversation_create_params import ConversationCreateParams as ConversationCreateParams
+from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams
+from .computer_screenshot_content import ComputerScreenshotContent as ComputerScreenshotContent
+from .container_file_citation_body import ContainerFileCitationBody as ContainerFileCitationBody
+from .conversation_deleted_resource import ConversationDeletedResource as ConversationDeletedResource
diff --git a/src/openai/types/conversations/computer_screenshot_content.py b/src/openai/types/conversations/computer_screenshot_content.py
new file mode 100644
index 0000000000..897b7ada0d
--- /dev/null
+++ b/src/openai/types/conversations/computer_screenshot_content.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ComputerScreenshotContent"]
+
+
+class ComputerScreenshotContent(BaseModel):
+ file_id: Optional[str] = None
+ """The identifier of an uploaded file that contains the screenshot."""
+
+ image_url: Optional[str] = None
+ """The URL of the screenshot image."""
+
+ type: Literal["computer_screenshot"]
+ """Specifies the event type.
+
+ For a computer screenshot, this property is always set to `computer_screenshot`.
+ """
diff --git a/src/openai/types/conversations/container_file_citation_body.py b/src/openai/types/conversations/container_file_citation_body.py
new file mode 100644
index 0000000000..ea460df2e2
--- /dev/null
+++ b/src/openai/types/conversations/container_file_citation_body.py
@@ -0,0 +1,27 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ContainerFileCitationBody"]
+
+
+class ContainerFileCitationBody(BaseModel):
+ container_id: str
+ """The ID of the container file."""
+
+ end_index: int
+ """The index of the last character of the container file citation in the message."""
+
+ file_id: str
+ """The ID of the file."""
+
+ filename: str
+ """The filename of the container file cited."""
+
+ start_index: int
+ """The index of the first character of the container file citation in the message."""
+
+ type: Literal["container_file_citation"]
+ """The type of the container file citation. Always `container_file_citation`."""
diff --git a/src/openai/types/conversations/conversation.py b/src/openai/types/conversations/conversation.py
new file mode 100644
index 0000000000..ed63d40355
--- /dev/null
+++ b/src/openai/types/conversations/conversation.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["Conversation"]
+
+
+class Conversation(BaseModel):
+ id: str
+ """The unique ID of the conversation."""
+
+ created_at: int
+ """
+ The time at which the conversation was created, measured in seconds since the
+ Unix epoch.
+ """
+
+ metadata: object
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters.
+ """
+
+ object: Literal["conversation"]
+ """The object type, which is always `conversation`."""
diff --git a/src/openai/types/conversations/conversation_create_params.py b/src/openai/types/conversations/conversation_create_params.py
new file mode 100644
index 0000000000..7ad3f8ae2d
--- /dev/null
+++ b/src/openai/types/conversations/conversation_create_params.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable, Optional
+from typing_extensions import TypedDict
+
+from ..shared_params.metadata import Metadata
+from ..responses.response_input_item_param import ResponseInputItemParam
+
+__all__ = ["ConversationCreateParams"]
+
+
+class ConversationCreateParams(TypedDict, total=False):
+ items: Optional[Iterable[ResponseInputItemParam]]
+ """
+ Initial items to include in the conversation context. You may add up to 20 items
+ at a time.
+ """
+
+ metadata: Optional[Metadata]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ Useful for storing additional information about the object in a structured
+ format.
+ """
diff --git a/src/openai/types/conversations/conversation_deleted_resource.py b/src/openai/types/conversations/conversation_deleted_resource.py
new file mode 100644
index 0000000000..7abcb2448e
--- /dev/null
+++ b/src/openai/types/conversations/conversation_deleted_resource.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ConversationDeletedResource"]
+
+
+class ConversationDeletedResource(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["conversation.deleted"]
diff --git a/src/openai/types/conversations/conversation_item.py b/src/openai/types/conversations/conversation_item.py
new file mode 100644
index 0000000000..a7cd355f36
--- /dev/null
+++ b/src/openai/types/conversations/conversation_item.py
@@ -0,0 +1,209 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from .message import Message
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from ..responses.response_reasoning_item import ResponseReasoningItem
+from ..responses.response_custom_tool_call import ResponseCustomToolCall
+from ..responses.response_computer_tool_call import ResponseComputerToolCall
+from ..responses.response_function_web_search import ResponseFunctionWebSearch
+from ..responses.response_file_search_tool_call import ResponseFileSearchToolCall
+from ..responses.response_custom_tool_call_output import ResponseCustomToolCallOutput
+from ..responses.response_function_tool_call_item import ResponseFunctionToolCallItem
+from ..responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall
+from ..responses.response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem
+from ..responses.response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem
+
+__all__ = [
+ "ConversationItem",
+ "ImageGenerationCall",
+ "LocalShellCall",
+ "LocalShellCallAction",
+ "LocalShellCallOutput",
+ "McpListTools",
+ "McpListToolsTool",
+ "McpApprovalRequest",
+ "McpApprovalResponse",
+ "McpCall",
+]
+
+
+class ImageGenerationCall(BaseModel):
+ id: str
+ """The unique ID of the image generation call."""
+
+ result: Optional[str] = None
+ """The generated image encoded in base64."""
+
+ status: Literal["in_progress", "completed", "generating", "failed"]
+ """The status of the image generation call."""
+
+ type: Literal["image_generation_call"]
+ """The type of the image generation call. Always `image_generation_call`."""
+
+
+class LocalShellCallAction(BaseModel):
+ command: List[str]
+ """The command to run."""
+
+ env: Dict[str, str]
+ """Environment variables to set for the command."""
+
+ type: Literal["exec"]
+ """The type of the local shell action. Always `exec`."""
+
+ timeout_ms: Optional[int] = None
+ """Optional timeout in milliseconds for the command."""
+
+ user: Optional[str] = None
+ """Optional user to run the command as."""
+
+ working_directory: Optional[str] = None
+ """Optional working directory to run the command in."""
+
+
+class LocalShellCall(BaseModel):
+ id: str
+ """The unique ID of the local shell call."""
+
+ action: LocalShellCallAction
+ """Execute a shell command on the server."""
+
+ call_id: str
+ """The unique ID of the local shell tool call generated by the model."""
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of the local shell call."""
+
+ type: Literal["local_shell_call"]
+ """The type of the local shell call. Always `local_shell_call`."""
+
+
+class LocalShellCallOutput(BaseModel):
+ id: str
+ """The unique ID of the local shell tool call generated by the model."""
+
+ output: str
+ """A JSON string of the output of the local shell tool call."""
+
+ type: Literal["local_shell_call_output"]
+ """The type of the local shell tool call output. Always `local_shell_call_output`."""
+
+ status: Optional[Literal["in_progress", "completed", "incomplete"]] = None
+ """The status of the item. One of `in_progress`, `completed`, or `incomplete`."""
+
+
+class McpListToolsTool(BaseModel):
+ input_schema: object
+ """The JSON schema describing the tool's input."""
+
+ name: str
+ """The name of the tool."""
+
+ annotations: Optional[object] = None
+ """Additional annotations about the tool."""
+
+ description: Optional[str] = None
+ """The description of the tool."""
+
+
+class McpListTools(BaseModel):
+ id: str
+ """The unique ID of the list."""
+
+ server_label: str
+ """The label of the MCP server."""
+
+ tools: List[McpListToolsTool]
+ """The tools available on the server."""
+
+ type: Literal["mcp_list_tools"]
+ """The type of the item. Always `mcp_list_tools`."""
+
+ error: Optional[str] = None
+ """Error message if the server could not list tools."""
+
+
+class McpApprovalRequest(BaseModel):
+ id: str
+ """The unique ID of the approval request."""
+
+ arguments: str
+ """A JSON string of arguments for the tool."""
+
+ name: str
+ """The name of the tool to run."""
+
+ server_label: str
+ """The label of the MCP server making the request."""
+
+ type: Literal["mcp_approval_request"]
+ """The type of the item. Always `mcp_approval_request`."""
+
+
+class McpApprovalResponse(BaseModel):
+ id: str
+ """The unique ID of the approval response"""
+
+ approval_request_id: str
+ """The ID of the approval request being answered."""
+
+ approve: bool
+ """Whether the request was approved."""
+
+ type: Literal["mcp_approval_response"]
+ """The type of the item. Always `mcp_approval_response`."""
+
+ reason: Optional[str] = None
+ """Optional reason for the decision."""
+
+
+class McpCall(BaseModel):
+ id: str
+ """The unique ID of the tool call."""
+
+ arguments: str
+ """A JSON string of the arguments passed to the tool."""
+
+ name: str
+ """The name of the tool that was run."""
+
+ server_label: str
+ """The label of the MCP server running the tool."""
+
+ type: Literal["mcp_call"]
+ """The type of the item. Always `mcp_call`."""
+
+ error: Optional[str] = None
+ """The error from the tool call, if any."""
+
+ output: Optional[str] = None
+ """The output from the tool call."""
+
+
+ConversationItem: TypeAlias = Annotated[
+ Union[
+ Message,
+ ResponseFunctionToolCallItem,
+ ResponseFunctionToolCallOutputItem,
+ ResponseFileSearchToolCall,
+ ResponseFunctionWebSearch,
+ ImageGenerationCall,
+ ResponseComputerToolCall,
+ ResponseComputerToolCallOutputItem,
+ ResponseReasoningItem,
+ ResponseCodeInterpreterToolCall,
+ LocalShellCall,
+ LocalShellCallOutput,
+ McpListTools,
+ McpApprovalRequest,
+ McpApprovalResponse,
+ McpCall,
+ ResponseCustomToolCall,
+ ResponseCustomToolCallOutput,
+ ],
+ PropertyInfo(discriminator="type"),
+]
diff --git a/src/openai/types/conversations/conversation_item_list.py b/src/openai/types/conversations/conversation_item_list.py
new file mode 100644
index 0000000000..20091102cb
--- /dev/null
+++ b/src/openai/types/conversations/conversation_item_list.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .conversation_item import ConversationItem
+
+__all__ = ["ConversationItemList"]
+
+
+class ConversationItemList(BaseModel):
+ data: List[ConversationItem]
+ """A list of conversation items."""
+
+ first_id: str
+ """The ID of the first item in the list."""
+
+ has_more: bool
+ """Whether there are more items available."""
+
+ last_id: str
+ """The ID of the last item in the list."""
+
+ object: Literal["list"]
+ """The type of object returned, must be `list`."""
diff --git a/src/openai/types/conversations/conversation_update_params.py b/src/openai/types/conversations/conversation_update_params.py
new file mode 100644
index 0000000000..f2aa42d833
--- /dev/null
+++ b/src/openai/types/conversations/conversation_update_params.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ConversationUpdateParams"]
+
+
+class ConversationUpdateParams(TypedDict, total=False):
+ metadata: Required[Dict[str, str]]
+ """Set of 16 key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format, and querying for objects via API or the dashboard. Keys are
+ strings with a maximum length of 64 characters. Values are strings with a
+ maximum length of 512 characters.
+ """
diff --git a/src/openai/types/conversations/file_citation_body.py b/src/openai/types/conversations/file_citation_body.py
new file mode 100644
index 0000000000..ea90ae381d
--- /dev/null
+++ b/src/openai/types/conversations/file_citation_body.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["FileCitationBody"]
+
+
+class FileCitationBody(BaseModel):
+ file_id: str
+ """The ID of the file."""
+
+ filename: str
+ """The filename of the file cited."""
+
+ index: int
+ """The index of the file in the list of files."""
+
+ type: Literal["file_citation"]
+ """The type of the file citation. Always `file_citation`."""
diff --git a/src/openai/types/conversations/input_file_content.py b/src/openai/types/conversations/input_file_content.py
new file mode 100644
index 0000000000..6aef7a89d9
--- /dev/null
+++ b/src/openai/types/conversations/input_file_content.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["InputFileContent"]
+
+
+class InputFileContent(BaseModel):
+ file_id: Optional[str] = None
+ """The ID of the file to be sent to the model."""
+
+ type: Literal["input_file"]
+ """The type of the input item. Always `input_file`."""
+
+ file_url: Optional[str] = None
+ """The URL of the file to be sent to the model."""
+
+ filename: Optional[str] = None
+ """The name of the file to be sent to the model."""
diff --git a/src/openai/types/conversations/input_image_content.py b/src/openai/types/conversations/input_image_content.py
new file mode 100644
index 0000000000..f2587e0adc
--- /dev/null
+++ b/src/openai/types/conversations/input_image_content.py
@@ -0,0 +1,28 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["InputImageContent"]
+
+
+class InputImageContent(BaseModel):
+ detail: Literal["low", "high", "auto"]
+ """The detail level of the image to be sent to the model.
+
+ One of `high`, `low`, or `auto`. Defaults to `auto`.
+ """
+
+ file_id: Optional[str] = None
+ """The ID of the file to be sent to the model."""
+
+ image_url: Optional[str] = None
+ """The URL of the image to be sent to the model.
+
+ A fully qualified URL or base64 encoded image in a data URL.
+ """
+
+ type: Literal["input_image"]
+ """The type of the input item. Always `input_image`."""
diff --git a/src/openai/types/conversations/input_text_content.py b/src/openai/types/conversations/input_text_content.py
new file mode 100644
index 0000000000..5e2daebdc5
--- /dev/null
+++ b/src/openai/types/conversations/input_text_content.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["InputTextContent"]
+
+
+class InputTextContent(BaseModel):
+ text: str
+ """The text input to the model."""
+
+ type: Literal["input_text"]
+ """The type of the input item. Always `input_text`."""
diff --git a/src/openai/types/conversations/item_create_params.py b/src/openai/types/conversations/item_create_params.py
new file mode 100644
index 0000000000..9158b7167f
--- /dev/null
+++ b/src/openai/types/conversations/item_create_params.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Iterable
+from typing_extensions import Required, TypedDict
+
+from ..responses.response_includable import ResponseIncludable
+from ..responses.response_input_item_param import ResponseInputItemParam
+
+__all__ = ["ItemCreateParams"]
+
+
+class ItemCreateParams(TypedDict, total=False):
+ items: Required[Iterable[ResponseInputItemParam]]
+ """The items to add to the conversation. You may add up to 20 items at a time."""
+
+ include: List[ResponseIncludable]
+ """Additional fields to include in the response.
+
+ See the `include` parameter for
+ [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+ for more information.
+ """
diff --git a/src/openai/types/conversations/item_list_params.py b/src/openai/types/conversations/item_list_params.py
new file mode 100644
index 0000000000..34bf43c559
--- /dev/null
+++ b/src/openai/types/conversations/item_list_params.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Literal, TypedDict
+
+from ..responses.response_includable import ResponseIncludable
+
+__all__ = ["ItemListParams"]
+
+
+class ItemListParams(TypedDict, total=False):
+ after: str
+ """An item ID to list items after, used in pagination."""
+
+ include: List[ResponseIncludable]
+ """Specify additional output data to include in the model response.
+
+ Currently supported values are:
+
+ - `code_interpreter_call.outputs`: Includes the outputs of python code execution
+ in code interpreter tool call items.
+ - `computer_call_output.output.image_url`: Include image urls from the computer
+ call output.
+ - `file_search_call.results`: Include the search results of the file search tool
+ call.
+ - `message.input_image.image_url`: Include image urls from the input message.
+ - `message.output_text.logprobs`: Include logprobs with assistant messages.
+ - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
+ tokens in reasoning item outputs. This enables reasoning items to be used in
+ multi-turn conversations when using the Responses API statelessly (like when
+ the `store` parameter is set to `false`, or when an organization is enrolled
+ in the zero data retention program).
+ """
+
+ limit: int
+ """A limit on the number of objects to be returned.
+
+ Limit can range between 1 and 100, and the default is 20.
+ """
+
+ order: Literal["asc", "desc"]
+ """The order to return the input items in. Default is `desc`.
+
+ - `asc`: Return the input items in ascending order.
+ - `desc`: Return the input items in descending order.
+ """
diff --git a/src/openai/types/conversations/item_retrieve_params.py b/src/openai/types/conversations/item_retrieve_params.py
new file mode 100644
index 0000000000..8c5db1e533
--- /dev/null
+++ b/src/openai/types/conversations/item_retrieve_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List
+from typing_extensions import Required, TypedDict
+
+from ..responses.response_includable import ResponseIncludable
+
+__all__ = ["ItemRetrieveParams"]
+
+
+class ItemRetrieveParams(TypedDict, total=False):
+ conversation_id: Required[str]
+
+ include: List[ResponseIncludable]
+ """Additional fields to include in the response.
+
+ See the `include` parameter for
+ [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include)
+ for more information.
+ """
diff --git a/src/openai/types/conversations/lob_prob.py b/src/openai/types/conversations/lob_prob.py
new file mode 100644
index 0000000000..f7dcd62a5e
--- /dev/null
+++ b/src/openai/types/conversations/lob_prob.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+from .top_log_prob import TopLogProb
+
+__all__ = ["LobProb"]
+
+
+class LobProb(BaseModel):
+ token: str
+
+ bytes: List[int]
+
+ logprob: float
+
+ top_logprobs: List[TopLogProb]
diff --git a/src/openai/types/conversations/message.py b/src/openai/types/conversations/message.py
new file mode 100644
index 0000000000..a070cf2869
--- /dev/null
+++ b/src/openai/types/conversations/message.py
@@ -0,0 +1,56 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from ..._models import BaseModel
+from .text_content import TextContent
+from .refusal_content import RefusalContent
+from .input_file_content import InputFileContent
+from .input_text_content import InputTextContent
+from .input_image_content import InputImageContent
+from .output_text_content import OutputTextContent
+from .summary_text_content import SummaryTextContent
+from .computer_screenshot_content import ComputerScreenshotContent
+
+__all__ = ["Message", "Content"]
+
+Content: TypeAlias = Annotated[
+ Union[
+ InputTextContent,
+ OutputTextContent,
+ TextContent,
+ SummaryTextContent,
+ RefusalContent,
+ InputImageContent,
+ ComputerScreenshotContent,
+ InputFileContent,
+ ],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class Message(BaseModel):
+ id: str
+ """The unique ID of the message."""
+
+ content: List[Content]
+ """The content of the message"""
+
+ role: Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"]
+ """The role of the message.
+
+ One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`,
+ `developer`, or `tool`.
+ """
+
+ status: Literal["in_progress", "completed", "incomplete"]
+ """The status of item.
+
+ One of `in_progress`, `completed`, or `incomplete`. Populated when items are
+ returned via API.
+ """
+
+ type: Literal["message"]
+ """The type of the message. Always set to `message`."""
diff --git a/src/openai/types/conversations/output_text_content.py b/src/openai/types/conversations/output_text_content.py
new file mode 100644
index 0000000000..2ffee76526
--- /dev/null
+++ b/src/openai/types/conversations/output_text_content.py
@@ -0,0 +1,30 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
+
+from ..._utils import PropertyInfo
+from .lob_prob import LobProb
+from ..._models import BaseModel
+from .url_citation_body import URLCitationBody
+from .file_citation_body import FileCitationBody
+from .container_file_citation_body import ContainerFileCitationBody
+
+__all__ = ["OutputTextContent", "Annotation"]
+
+Annotation: TypeAlias = Annotated[
+ Union[FileCitationBody, URLCitationBody, ContainerFileCitationBody], PropertyInfo(discriminator="type")
+]
+
+
+class OutputTextContent(BaseModel):
+ annotations: List[Annotation]
+ """The annotations of the text output."""
+
+ text: str
+ """The text output from the model."""
+
+ type: Literal["output_text"]
+ """The type of the output text. Always `output_text`."""
+
+ logprobs: Optional[List[LobProb]] = None
diff --git a/src/openai/types/conversations/refusal_content.py b/src/openai/types/conversations/refusal_content.py
new file mode 100644
index 0000000000..3c8bd5e35f
--- /dev/null
+++ b/src/openai/types/conversations/refusal_content.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["RefusalContent"]
+
+
+class RefusalContent(BaseModel):
+ refusal: str
+ """The refusal explanation from the model."""
+
+ type: Literal["refusal"]
+ """The type of the refusal. Always `refusal`."""
diff --git a/src/openai/types/conversations/summary_text_content.py b/src/openai/types/conversations/summary_text_content.py
new file mode 100644
index 0000000000..047769ed67
--- /dev/null
+++ b/src/openai/types/conversations/summary_text_content.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SummaryTextContent"]
+
+
+class SummaryTextContent(BaseModel):
+ text: str
+
+ type: Literal["summary_text"]
diff --git a/src/openai/types/conversations/text_content.py b/src/openai/types/conversations/text_content.py
new file mode 100644
index 0000000000..f1ae079597
--- /dev/null
+++ b/src/openai/types/conversations/text_content.py
@@ -0,0 +1,13 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["TextContent"]
+
+
+class TextContent(BaseModel):
+ text: str
+
+ type: Literal["text"]
diff --git a/src/openai/types/conversations/top_log_prob.py b/src/openai/types/conversations/top_log_prob.py
new file mode 100644
index 0000000000..fafca756ae
--- /dev/null
+++ b/src/openai/types/conversations/top_log_prob.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["TopLogProb"]
+
+
+class TopLogProb(BaseModel):
+ token: str
+
+ bytes: List[int]
+
+ logprob: float
diff --git a/src/openai/types/conversations/url_citation_body.py b/src/openai/types/conversations/url_citation_body.py
new file mode 100644
index 0000000000..1becb44bc0
--- /dev/null
+++ b/src/openai/types/conversations/url_citation_body.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["URLCitationBody"]
+
+
+class URLCitationBody(BaseModel):
+ end_index: int
+ """The index of the last character of the URL citation in the message."""
+
+ start_index: int
+ """The index of the first character of the URL citation in the message."""
+
+ title: str
+ """The title of the web resource."""
+
+ type: Literal["url_citation"]
+ """The type of the URL citation. Always `url_citation`."""
+
+ url: str
+ """The URL of the web resource."""
diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py
index bb39d1d3e5..efcab9adb8 100644
--- a/src/openai/types/evals/create_eval_completions_run_data_source.py
+++ b/src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -23,10 +23,10 @@
"InputMessages",
"InputMessagesTemplate",
"InputMessagesTemplateTemplate",
- "InputMessagesTemplateTemplateMessage",
- "InputMessagesTemplateTemplateMessageContent",
- "InputMessagesTemplateTemplateMessageContentOutputText",
- "InputMessagesTemplateTemplateMessageContentInputImage",
+ "InputMessagesTemplateTemplateEvalItem",
+ "InputMessagesTemplateTemplateEvalItemContent",
+ "InputMessagesTemplateTemplateEvalItemContentOutputText",
+ "InputMessagesTemplateTemplateEvalItemContentInputImage",
"InputMessagesItemReference",
"SamplingParams",
"SamplingParamsResponseFormat",
@@ -87,7 +87,7 @@ class SourceStoredCompletions(BaseModel):
]
-class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
+class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
text: str
"""The text output from the model."""
@@ -95,7 +95,7 @@ class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
"""The type of the output text. Always `output_text`."""
-class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel):
+class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel):
image_url: str
"""The URL of the image input."""
@@ -109,17 +109,17 @@ class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel):
"""
-InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
str,
ResponseInputText,
- InputMessagesTemplateTemplateMessageContentOutputText,
- InputMessagesTemplateTemplateMessageContentInputImage,
+ InputMessagesTemplateTemplateEvalItemContentOutputText,
+ InputMessagesTemplateTemplateEvalItemContentInputImage,
List[object],
]
-class InputMessagesTemplateTemplateMessage(BaseModel):
- content: InputMessagesTemplateTemplateMessageContent
+class InputMessagesTemplateTemplateEvalItem(BaseModel):
+ content: InputMessagesTemplateTemplateEvalItemContent
"""Inputs to the model - can contain template strings."""
role: Literal["user", "assistant", "system", "developer"]
@@ -132,9 +132,7 @@ class InputMessagesTemplateTemplateMessage(BaseModel):
"""The type of the message input. Always `message`."""
-InputMessagesTemplateTemplate: TypeAlias = Annotated[
- Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type")
-]
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessage, InputMessagesTemplateTemplateEvalItem]
class InputMessagesTemplate(BaseModel):
diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py
index 7c71ecbe88..effa658452 100644
--- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py
+++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -23,10 +23,10 @@
"InputMessages",
"InputMessagesTemplate",
"InputMessagesTemplateTemplate",
- "InputMessagesTemplateTemplateMessage",
- "InputMessagesTemplateTemplateMessageContent",
- "InputMessagesTemplateTemplateMessageContentOutputText",
- "InputMessagesTemplateTemplateMessageContentInputImage",
+ "InputMessagesTemplateTemplateEvalItem",
+ "InputMessagesTemplateTemplateEvalItemContent",
+ "InputMessagesTemplateTemplateEvalItemContentOutputText",
+ "InputMessagesTemplateTemplateEvalItemContentInputImage",
"InputMessagesItemReference",
"SamplingParams",
"SamplingParamsResponseFormat",
@@ -85,7 +85,7 @@ class SourceStoredCompletions(TypedDict, total=False):
Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions]
-class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False):
+class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False):
text: Required[str]
"""The text output from the model."""
@@ -93,7 +93,7 @@ class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=Fal
"""The type of the output text. Always `output_text`."""
-class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=False):
+class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=False):
image_url: Required[str]
"""The URL of the image input."""
@@ -107,17 +107,17 @@ class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=Fal
"""
-InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
str,
ResponseInputTextParam,
- InputMessagesTemplateTemplateMessageContentOutputText,
- InputMessagesTemplateTemplateMessageContentInputImage,
+ InputMessagesTemplateTemplateEvalItemContentOutputText,
+ InputMessagesTemplateTemplateEvalItemContentInputImage,
Iterable[object],
]
-class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
- content: Required[InputMessagesTemplateTemplateMessageContent]
+class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False):
+ content: Required[InputMessagesTemplateTemplateEvalItemContent]
"""Inputs to the model - can contain template strings."""
role: Required[Literal["user", "assistant", "system", "developer"]]
@@ -130,7 +130,7 @@ class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
"""The type of the message input. Always `message`."""
-InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage]
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateEvalItem]
class InputMessagesTemplate(TypedDict, total=False):
diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py
index 74d8688081..7c574ed315 100644
--- a/src/openai/types/responses/__init__.py
+++ b/src/openai/types/responses/__init__.py
@@ -79,6 +79,7 @@
from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam
from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam
from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall
+from .response_conversation_param import ResponseConversationParam as ResponseConversationParam
from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig
from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall
from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem
diff --git a/src/openai/types/responses/input_item_list_params.py b/src/openai/types/responses/input_item_list_params.py
index 6a18d920cb..44a8dc5de3 100644
--- a/src/openai/types/responses/input_item_list_params.py
+++ b/src/openai/types/responses/input_item_list_params.py
@@ -14,9 +14,6 @@ class InputItemListParams(TypedDict, total=False):
after: str
"""An item ID to list items after, used in pagination."""
- before: str
- """An item ID to list items before, used in pagination."""
-
include: List[ResponseIncludable]
"""Additional fields to include in the response.
diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py
index 49f60bbc5c..ce9effd75e 100644
--- a/src/openai/types/responses/response.py
+++ b/src/openai/types/responses/response.py
@@ -22,7 +22,7 @@
from .tool_choice_function import ToolChoiceFunction
from ..shared.responses_model import ResponsesModel
-__all__ = ["Response", "IncompleteDetails", "ToolChoice"]
+__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"]
class IncompleteDetails(BaseModel):
@@ -35,6 +35,11 @@ class IncompleteDetails(BaseModel):
]
+class Conversation(BaseModel):
+ id: str
+ """The unique ID of the conversation."""
+
+
class Response(BaseModel):
id: str
"""Unique identifier for this Response."""
@@ -141,6 +146,13 @@ class Response(BaseModel):
[Learn more](https://platform.openai.com/docs/guides/background).
"""
+ conversation: Optional[Conversation] = None
+ """The conversation that this response belongs to.
+
+ Input items and output items from this response are automatically added to this
+ conversation.
+ """
+
max_output_tokens: Optional[int] = None
"""
An upper bound for the number of tokens that can be generated for a response,
@@ -161,6 +173,7 @@ class Response(BaseModel):
Use this to create multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
"""
prompt: Optional[ResponsePrompt] = None
diff --git a/src/openai/types/responses/response_conversation_param.py b/src/openai/types/responses/response_conversation_param.py
new file mode 100644
index 0000000000..067bdc7a31
--- /dev/null
+++ b/src/openai/types/responses/response_conversation_param.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["ResponseConversationParam"]
+
+
+class ResponseConversationParam(TypedDict, total=False):
+ id: Required[str]
+ """The unique ID of the conversation."""
diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py
index 0cd761fcf0..5129b8b771 100644
--- a/src/openai/types/responses/response_create_params.py
+++ b/src/openai/types/responses/response_create_params.py
@@ -18,10 +18,12 @@
from .tool_choice_allowed_param import ToolChoiceAllowedParam
from .response_text_config_param import ResponseTextConfigParam
from .tool_choice_function_param import ToolChoiceFunctionParam
+from .response_conversation_param import ResponseConversationParam
from ..shared_params.responses_model import ResponsesModel
__all__ = [
"ResponseCreateParamsBase",
+ "Conversation",
"StreamOptions",
"ToolChoice",
"ResponseCreateParamsNonStreaming",
@@ -36,6 +38,14 @@ class ResponseCreateParamsBase(TypedDict, total=False):
[Learn more](https://platform.openai.com/docs/guides/background).
"""
+ conversation: Optional[Conversation]
+ """The conversation that this response belongs to.
+
+ Items from this conversation are prepended to `input_items` for this response
+ request. Input items and output items from this response are automatically added
+ to this conversation after this response completes.
+ """
+
include: Optional[List[ResponseIncludable]]
"""Specify additional output data to include in the model response.
@@ -118,6 +128,7 @@ class ResponseCreateParamsBase(TypedDict, total=False):
Use this to create multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
+ Cannot be used in conjunction with `conversation`.
"""
prompt: Optional[ResponsePromptParam]
@@ -253,6 +264,9 @@ class ResponseCreateParamsBase(TypedDict, total=False):
"""
+Conversation: TypeAlias = Union[str, ResponseConversationParam]
+
+
class StreamOptions(TypedDict, total=False):
include_obfuscation: bool
"""When true, stream obfuscation will be enabled.
diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py
index 455ba01666..d46f8cb0be 100644
--- a/src/openai/types/responses/tool.py
+++ b/src/openai/types/responses/tool.py
@@ -15,7 +15,7 @@
"Tool",
"Mcp",
"McpAllowedTools",
- "McpAllowedToolsMcpAllowedToolsFilter",
+ "McpAllowedToolsMcpToolFilter",
"McpRequireApproval",
"McpRequireApprovalMcpToolApprovalFilter",
"McpRequireApprovalMcpToolApprovalFilterAlways",
@@ -29,30 +29,54 @@
]
-class McpAllowedToolsMcpAllowedToolsFilter(BaseModel):
+class McpAllowedToolsMcpToolFilter(BaseModel):
+ read_only: Optional[bool] = None
+ """Indicates whether or not a tool modifies data or is read-only.
+
+ If an MCP server is
+ [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+ it will match this filter.
+ """
+
tool_names: Optional[List[str]] = None
"""List of allowed tool names."""
-McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter, None]
+McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter, None]
class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel):
+ read_only: Optional[bool] = None
+ """Indicates whether or not a tool modifies data or is read-only.
+
+ If an MCP server is
+ [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+ it will match this filter.
+ """
+
tool_names: Optional[List[str]] = None
- """List of tools that require approval."""
+ """List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel):
+ read_only: Optional[bool] = None
+ """Indicates whether or not a tool modifies data or is read-only.
+
+ If an MCP server is
+ [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+ it will match this filter.
+ """
+
tool_names: Optional[List[str]] = None
- """List of tools that do not require approval."""
+ """List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilter(BaseModel):
always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None
- """A list of tools that always require approval."""
+ """A filter object to specify which tools are allowed."""
never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None
- """A list of tools that never require approval."""
+ """A filter object to specify which tools are allowed."""
McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None]
@@ -62,15 +86,49 @@ class Mcp(BaseModel):
server_label: str
"""A label for this MCP server, used to identify it in tool calls."""
- server_url: str
- """The URL for the MCP server."""
-
type: Literal["mcp"]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[McpAllowedTools] = None
"""List of allowed tool names or a filter object."""
+ authorization: Optional[str] = None
+ """
+ An OAuth access token that can be used with a remote MCP server, either with a
+ custom MCP server URL or a service connector. Your application must handle the
+ OAuth authorization flow and provide the token here.
+ """
+
+ connector_id: Optional[
+ Literal[
+ "connector_dropbox",
+ "connector_gmail",
+ "connector_googlecalendar",
+ "connector_googledrive",
+ "connector_microsoftteams",
+ "connector_outlookcalendar",
+ "connector_outlookemail",
+ "connector_sharepoint",
+ ]
+ ] = None
+ """Identifier for service connectors, like those available in ChatGPT.
+
+ One of `server_url` or `connector_id` must be provided. Learn more about service
+ connectors
+ [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
+
+ Currently supported `connector_id` values are:
+
+ - Dropbox: `connector_dropbox`
+ - Gmail: `connector_gmail`
+ - Google Calendar: `connector_googlecalendar`
+ - Google Drive: `connector_googledrive`
+ - Microsoft Teams: `connector_microsoftteams`
+ - Outlook Calendar: `connector_outlookcalendar`
+ - Outlook Email: `connector_outlookemail`
+ - SharePoint: `connector_sharepoint`
+ """
+
headers: Optional[Dict[str, str]] = None
"""Optional HTTP headers to send to the MCP server.
@@ -83,6 +141,12 @@ class Mcp(BaseModel):
server_description: Optional[str] = None
"""Optional description of the MCP server, used to provide more context."""
+ server_url: Optional[str] = None
+ """The URL for the MCP server.
+
+ One of `server_url` or `connector_id` must be provided.
+ """
+
class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel):
type: Literal["auto"]
diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py
index f91e758559..9dde42e294 100644
--- a/src/openai/types/responses/tool_param.py
+++ b/src/openai/types/responses/tool_param.py
@@ -16,7 +16,7 @@
"ToolParam",
"Mcp",
"McpAllowedTools",
- "McpAllowedToolsMcpAllowedToolsFilter",
+ "McpAllowedToolsMcpToolFilter",
"McpRequireApproval",
"McpRequireApprovalMcpToolApprovalFilter",
"McpRequireApprovalMcpToolApprovalFilterAlways",
@@ -30,30 +30,54 @@
]
-class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False):
+class McpAllowedToolsMcpToolFilter(TypedDict, total=False):
+ read_only: bool
+ """Indicates whether or not a tool modifies data or is read-only.
+
+ If an MCP server is
+ [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+ it will match this filter.
+ """
+
tool_names: List[str]
"""List of allowed tool names."""
-McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter]
+McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter]
class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False):
+ read_only: bool
+ """Indicates whether or not a tool modifies data or is read-only.
+
+ If an MCP server is
+ [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+ it will match this filter.
+ """
+
tool_names: List[str]
- """List of tools that require approval."""
+ """List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False):
+ read_only: bool
+ """Indicates whether or not a tool modifies data or is read-only.
+
+ If an MCP server is
+ [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
+ it will match this filter.
+ """
+
tool_names: List[str]
- """List of tools that do not require approval."""
+ """List of allowed tool names."""
class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False):
always: McpRequireApprovalMcpToolApprovalFilterAlways
- """A list of tools that always require approval."""
+ """A filter object to specify which tools are allowed."""
never: McpRequireApprovalMcpToolApprovalFilterNever
- """A list of tools that never require approval."""
+ """A filter object to specify which tools are allowed."""
McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]]
@@ -63,15 +87,47 @@ class Mcp(TypedDict, total=False):
server_label: Required[str]
"""A label for this MCP server, used to identify it in tool calls."""
- server_url: Required[str]
- """The URL for the MCP server."""
-
type: Required[Literal["mcp"]]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[McpAllowedTools]
"""List of allowed tool names or a filter object."""
+ authorization: str
+ """
+ An OAuth access token that can be used with a remote MCP server, either with a
+ custom MCP server URL or a service connector. Your application must handle the
+ OAuth authorization flow and provide the token here.
+ """
+
+ connector_id: Literal[
+ "connector_dropbox",
+ "connector_gmail",
+ "connector_googlecalendar",
+ "connector_googledrive",
+ "connector_microsoftteams",
+ "connector_outlookcalendar",
+ "connector_outlookemail",
+ "connector_sharepoint",
+ ]
+ """Identifier for service connectors, like those available in ChatGPT.
+
+ One of `server_url` or `connector_id` must be provided. Learn more about service
+ connectors
+ [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
+
+ Currently supported `connector_id` values are:
+
+ - Dropbox: `connector_dropbox`
+ - Gmail: `connector_gmail`
+ - Google Calendar: `connector_googlecalendar`
+ - Google Drive: `connector_googledrive`
+ - Microsoft Teams: `connector_microsoftteams`
+ - Outlook Calendar: `connector_outlookcalendar`
+ - Outlook Email: `connector_outlookemail`
+ - SharePoint: `connector_sharepoint`
+ """
+
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
@@ -84,6 +140,12 @@ class Mcp(TypedDict, total=False):
server_description: str
"""Optional description of the MCP server, used to provide more context."""
+ server_url: str
+ """The URL for the MCP server.
+
+ One of `server_url` or `connector_id` must be provided.
+ """
+
class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False):
type: Required[Literal["auto"]]
diff --git a/tests/api_resources/conversations/__init__.py b/tests/api_resources/conversations/__init__.py
new file mode 100644
index 0000000000..fd8019a9a1
--- /dev/null
+++ b/tests/api_resources/conversations/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/conversations/test_items.py b/tests/api_resources/conversations/test_items.py
new file mode 100644
index 0000000000..c308160543
--- /dev/null
+++ b/tests/api_resources/conversations/test_items.py
@@ -0,0 +1,491 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage
+from openai.types.conversations import (
+ Conversation,
+ ConversationItem,
+ ConversationItemList,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestItems:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ item = client.conversations.items.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ )
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ item = client.conversations.items.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ "type": "message",
+ }
+ ],
+ include=["code_interpreter_call.outputs"],
+ )
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.conversations.items.with_raw_response.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.conversations.items.with_streaming_response.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = response.parse()
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_create(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.items.with_raw_response.create(
+ conversation_id="",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ )
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ item = client.conversations.items.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ @parametrize
+ def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
+ item = client.conversations.items.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ include=["code_interpreter_call.outputs"],
+ )
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.conversations.items.with_raw_response.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.conversations.items.with_streaming_response.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = response.parse()
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.items.with_raw_response.retrieve(
+ item_id="msg_abc",
+ conversation_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+ client.conversations.items.with_raw_response.retrieve(
+ item_id="",
+ conversation_id="conv_123",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ item = client.conversations.items.list(
+ conversation_id="conv_123",
+ )
+ assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ item = client.conversations.items.list(
+ conversation_id="conv_123",
+ after="after",
+ include=["code_interpreter_call.outputs"],
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.conversations.items.with_raw_response.list(
+ conversation_id="conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.conversations.items.with_streaming_response.list(
+ conversation_id="conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = response.parse()
+ assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_list(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.items.with_raw_response.list(
+ conversation_id="",
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ item = client.conversations.items.delete(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+ assert_matches_type(Conversation, item, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.conversations.items.with_raw_response.delete(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(Conversation, item, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.conversations.items.with_streaming_response.delete(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = response.parse()
+ assert_matches_type(Conversation, item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.items.with_raw_response.delete(
+ item_id="msg_abc",
+ conversation_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+ client.conversations.items.with_raw_response.delete(
+ item_id="",
+ conversation_id="conv_123",
+ )
+
+
+class TestAsyncItems:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ item = await async_client.conversations.items.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ )
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ item = await async_client.conversations.items.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ "type": "message",
+ }
+ ],
+ include=["code_interpreter_call.outputs"],
+ )
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.items.with_raw_response.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.items.with_streaming_response.create(
+ conversation_id="conv_123",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = await response.parse()
+ assert_matches_type(ConversationItemList, item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.items.with_raw_response.create(
+ conversation_id="",
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ }
+ ],
+ )
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ item = await async_client.conversations.items.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ @parametrize
+ async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ item = await async_client.conversations.items.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ include=["code_interpreter_call.outputs"],
+ )
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.items.with_raw_response.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.items.with_streaming_response.retrieve(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = await response.parse()
+ assert_matches_type(ConversationItem, item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.items.with_raw_response.retrieve(
+ item_id="msg_abc",
+ conversation_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+ await async_client.conversations.items.with_raw_response.retrieve(
+ item_id="",
+ conversation_id="conv_123",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ item = await async_client.conversations.items.list(
+ conversation_id="conv_123",
+ )
+ assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ item = await async_client.conversations.items.list(
+ conversation_id="conv_123",
+ after="after",
+ include=["code_interpreter_call.outputs"],
+ limit=0,
+ order="asc",
+ )
+ assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.items.with_raw_response.list(
+ conversation_id="conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.items.with_streaming_response.list(
+ conversation_id="conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = await response.parse()
+ assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.items.with_raw_response.list(
+ conversation_id="",
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ item = await async_client.conversations.items.delete(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+ assert_matches_type(Conversation, item, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.items.with_raw_response.delete(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ item = response.parse()
+ assert_matches_type(Conversation, item, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.items.with_streaming_response.delete(
+ item_id="msg_abc",
+ conversation_id="conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ item = await response.parse()
+ assert_matches_type(Conversation, item, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.items.with_raw_response.delete(
+ item_id="msg_abc",
+ conversation_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"):
+ await async_client.conversations.items.with_raw_response.delete(
+ item_id="",
+ conversation_id="conv_123",
+ )
diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py
index e8e3893bad..eda20c9a0b 100644
--- a/tests/api_resources/responses/test_input_items.py
+++ b/tests/api_resources/responses/test_input_items.py
@@ -30,7 +30,6 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None:
input_item = client.responses.input_items.list(
response_id="response_id",
after="after",
- before="before",
include=["code_interpreter_call.outputs"],
limit=0,
order="asc",
@@ -86,7 +85,6 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N
input_item = await async_client.responses.input_items.list(
response_id="response_id",
after="after",
- before="before",
include=["code_interpreter_call.outputs"],
limit=0,
order="asc",
diff --git a/tests/api_resources/test_conversations.py b/tests/api_resources/test_conversations.py
new file mode 100644
index 0000000000..d21e685a04
--- /dev/null
+++ b/tests/api_resources/test_conversations.py
@@ -0,0 +1,341 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from openai import OpenAI, AsyncOpenAI
+from tests.utils import assert_matches_type
+from openai.types.conversations import (
+ Conversation,
+ ConversationDeletedResource,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestConversations:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: OpenAI) -> None:
+ conversation = client.conversations.create()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: OpenAI) -> None:
+ conversation = client.conversations.create(
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ "type": "message",
+ }
+ ],
+ metadata={"foo": "string"},
+ )
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: OpenAI) -> None:
+ response = client.conversations.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: OpenAI) -> None:
+ with client.conversations.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: OpenAI) -> None:
+ conversation = client.conversations.retrieve(
+ "conv_123",
+ )
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ response = client.conversations.with_raw_response.retrieve(
+ "conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with client.conversations.with_streaming_response.retrieve(
+ "conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_update(self, client: OpenAI) -> None:
+ conversation = client.conversations.update(
+ conversation_id="conv_123",
+ metadata={"foo": "string"},
+ )
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ def test_raw_response_update(self, client: OpenAI) -> None:
+ response = client.conversations.with_raw_response.update(
+ conversation_id="conv_123",
+ metadata={"foo": "string"},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ def test_streaming_response_update(self, client: OpenAI) -> None:
+ with client.conversations.with_streaming_response.update(
+ conversation_id="conv_123",
+ metadata={"foo": "string"},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_update(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.with_raw_response.update(
+ conversation_id="",
+ metadata={"foo": "string"},
+ )
+
+ @parametrize
+ def test_method_delete(self, client: OpenAI) -> None:
+ conversation = client.conversations.delete(
+ "conv_123",
+ )
+ assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: OpenAI) -> None:
+ response = client.conversations.with_raw_response.delete(
+ "conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: OpenAI) -> None:
+ with client.conversations.with_streaming_response.delete(
+ "conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = response.parse()
+ assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: OpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ client.conversations.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncConversations:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncOpenAI) -> None:
+ conversation = await async_client.conversations.create()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ conversation = await async_client.conversations.create(
+ items=[
+ {
+ "content": "string",
+ "role": "user",
+ "type": "message",
+ }
+ ],
+ metadata={"foo": "string"},
+ )
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = await response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
+ conversation = await async_client.conversations.retrieve(
+ "conv_123",
+ )
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.with_raw_response.retrieve(
+ "conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.with_streaming_response.retrieve(
+ "conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = await response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_update(self, async_client: AsyncOpenAI) -> None:
+ conversation = await async_client.conversations.update(
+ conversation_id="conv_123",
+ metadata={"foo": "string"},
+ )
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.with_raw_response.update(
+ conversation_id="conv_123",
+ metadata={"foo": "string"},
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.with_streaming_response.update(
+ conversation_id="conv_123",
+ metadata={"foo": "string"},
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = await response.parse()
+ assert_matches_type(Conversation, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_update(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.with_raw_response.update(
+ conversation_id="",
+ metadata={"foo": "string"},
+ )
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
+ conversation = await async_client.conversations.delete(
+ "conv_123",
+ )
+ assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.conversations.with_raw_response.delete(
+ "conv_123",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ conversation = response.parse()
+ assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.conversations.with_streaming_response.delete(
+ "conv_123",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ conversation = await response.parse()
+ assert_matches_type(ConversationDeletedResource, conversation, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"):
+ await async_client.conversations.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py
index 310800b87e..0cc20e926b 100644
--- a/tests/api_resources/test_responses.py
+++ b/tests/api_resources/test_responses.py
@@ -29,6 +29,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None:
def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
response = client.responses.create(
background=True,
+ conversation="string",
include=["code_interpreter_call.outputs"],
input="string",
instructions="instructions",
@@ -108,6 +109,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
response_stream = client.responses.create(
stream=True,
background=True,
+ conversation="string",
include=["code_interpreter_call.outputs"],
input="string",
instructions="instructions",
@@ -380,6 +382,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None
async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None:
response = await async_client.responses.create(
background=True,
+ conversation="string",
include=["code_interpreter_call.outputs"],
input="string",
instructions="instructions",
@@ -459,6 +462,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
response_stream = await async_client.responses.create(
stream=True,
background=True,
+ conversation="string",
include=["code_interpreter_call.outputs"],
input="string",
instructions="instructions",