From b106b6e57868d4a163d6c75075cb2ca3bdd0c895 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 27 Jun 2025 22:37:39 +0000 Subject: [PATCH 01/90] chore(ci): only run for pushes and fork pull requests --- .github/workflows/ci.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7991b3e7c7..f92bb6ea6f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,6 +17,7 @@ jobs: timeout-minutes: 10 name: lint runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -42,6 +43,7 @@ jobs: contents: read id-token: write runs-on: depot-ubuntu-24.04 + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -62,6 +64,7 @@ jobs: timeout-minutes: 10 name: test runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -83,7 +86,7 @@ jobs: timeout-minutes: 10 name: examples runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} - if: github.repository == 'openai/openai-python' + if: github.repository == 'openai/openai-python && (github.event_name == 'push' || github.event.pull_request.head.repo.fork)' steps: - uses: actions/checkout@v4 From e4cacb867612ac7db956b64000bdc44e6cfc5efc Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sun, 29 Jun 2025 06:17:23 +0000 Subject: [PATCH 02/90] fix(ci): correct conditional --- .github/workflows/ci.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f92bb6ea6f..c405c77a7e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,14 +36,13 @@ jobs: run: ./scripts/lint upload: - if: github.repository == 'stainless-sdks/openai-python' + if: github.repository == 'stainless-sdks/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) timeout-minutes: 10 name: upload permissions: contents: read id-token: write runs-on: depot-ubuntu-24.04 - if: github.event_name == 'push' || github.event.pull_request.head.repo.fork steps: - uses: actions/checkout@v4 @@ -86,7 +85,7 @@ jobs: timeout-minutes: 10 name: examples runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} - if: github.repository == 'openai/openai-python && (github.event_name == 'push' || github.event.pull_request.head.repo.fork)' + if: github.repository == 'openai/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) steps: - uses: actions/checkout@v4 From be1f58f043f4d05488546f0c34ea1ac599ec409a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 1 Jul 2025 23:33:35 +0000 Subject: [PATCH 03/90] chore(ci): change upload type --- .github/workflows/ci.yml | 18 ++++++++++++++++-- scripts/utils/upload-artifact.sh | 12 +++++++----- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c405c77a7e..8067386d5f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,10 +35,10 @@ jobs: - name: Run lints run: ./scripts/lint - upload: + build: if: github.repository == 'stainless-sdks/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) timeout-minutes: 10 - name: upload + name: build permissions: contents: read id-token: write @@ -46,6 +46,20 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + + - name: Install dependencies + run: rye sync --all-features + + - name: Run build + run: rye build + - name: Get GitHub OIDC Token id: github-oidc uses: actions/github-script@v6 diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 75198de98f..cd522975fc 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -1,7 +1,9 @@ #!/usr/bin/env bash set -exuo pipefail -RESPONSE=$(curl -X POST "$URL" \ +FILENAME=$(basename dist/*.whl) + +RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \ -H "Authorization: Bearer $AUTH" \ -H "Content-Type: application/json") @@ -12,13 +14,13 @@ if [[ "$SIGNED_URL" == "null" ]]; then exit 1 fi -UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ - -H "Content-Type: application/gzip" \ - --data-binary @- "$SIGNED_URL" 2>&1) +UPLOAD_RESPONSE=$(curl -v -X PUT \ + -H "Content-Type: binary/octet-stream" \ + --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1) if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/openai-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/openai-python/$SHA/$FILENAME'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From 4a943ad413fe23dc75b36b5599f0669e4d53fb64 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 1 Jul 2025 17:42:49 -0700 Subject: [PATCH 04/90] fix(responses): add missing arguments to parse --- src/openai/resources/responses/responses.py | 40 +++++++++++++++------ 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index aaf2088f38..ce132bdb05 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -943,22 +943,27 @@ def stream( def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -991,21 +996,26 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "/responses", body=maybe_transform( { - "input": input, - "model": model, + "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, "reasoning": reasoning, + "service_tier": service_tier, "store": store, "stream": stream, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, @@ -2202,22 +2212,27 @@ def stream( async def parse( self, *, - input: Union[str, ResponseInputParam], - model: Union[str, ChatModel], text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, - tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + background: Optional[bool] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, + input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, max_output_tokens: Optional[int] | NotGiven = NOT_GIVEN, + max_tool_calls: Optional[int] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + model: ResponsesModel | NotGiven = NOT_GIVEN, parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, + prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, + tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, + top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -2250,21 +2265,26 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "/responses", body=maybe_transform( { - "input": input, - "model": model, + "background": background, "include": include, + "input": input, "instructions": instructions, "max_output_tokens": max_output_tokens, + "max_tool_calls": max_tool_calls, "metadata": metadata, + "model": model, "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, + "prompt": prompt, "reasoning": reasoning, + "service_tier": service_tier, "store": store, "stream": stream, "temperature": temperature, "text": text, "tool_choice": tool_choice, "tools": tools, + "top_logprobs": top_logprobs, "top_p": top_p, "truncation": truncation, "user": user, From 930662d9802b8e351a5c771dfc53604747d5ad68 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 1 Jul 2025 17:36:56 -0700 Subject: [PATCH 05/90] chore(tests): ensure parse method is in sync with create --- tests/api_resources/test_responses.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 9c76928c8c..158654ee70 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -9,6 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type +from openai._utils import assert_signatures_in_sync from openai.types.responses import ( Response, ) @@ -340,6 +341,17 @@ def test_path_params_cancel(self, client: OpenAI) -> None: ) +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: + checking_client: OpenAI | AsyncOpenAI = client if sync else async_client + + assert_signatures_in_sync( + checking_client.responses.create, + checking_client.responses.parse, + exclude_params={"stream", "tools"}, + ) + + class TestAsyncResponses: parametrize = pytest.mark.parametrize( "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] From 32a32967a4f0b1a62183194e6013b105ec291151 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 7 Jul 2025 13:36:19 +0100 Subject: [PATCH 06/90] fix(vector stores): add missing arguments to files.create_and_poll --- src/openai/resources/vector_stores/files.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/openai/resources/vector_stores/files.py b/src/openai/resources/vector_stores/files.py index f860384629..cf5c4c1d11 100644 --- a/src/openai/resources/vector_stores/files.py +++ b/src/openai/resources/vector_stores/files.py @@ -304,11 +304,14 @@ def create_and_poll( file_id: str, *, vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" - self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy) + self.create( + vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes + ) return self.poll( file_id, @@ -707,11 +710,14 @@ async def create_and_poll( file_id: str, *, vector_store_id: str, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: """Attach a file to the given vector store and wait for it to be processed.""" - await self.create(vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy) + await self.create( + vector_store_id=vector_store_id, file_id=file_id, chunking_strategy=chunking_strategy, attributes=attributes + ) return await self.poll( file_id, From 77d5ac2edb5d828faaff82baa524807823032188 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 7 Jul 2025 13:37:02 +0100 Subject: [PATCH 07/90] chore(tests): ensure vector store files create and poll method is in sync --- tests/api_resources/vector_stores/test_files.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py index 0778704d5d..c951a13b3f 100644 --- a/tests/api_resources/vector_stores/test_files.py +++ b/tests/api_resources/vector_stores/test_files.py @@ -9,6 +9,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type +from openai._utils import assert_signatures_in_sync from openai.pagination import SyncPage, AsyncPage, SyncCursorPage, AsyncCursorPage from openai.types.vector_stores import ( VectorStoreFile, @@ -625,3 +626,14 @@ async def test_path_params_content(self, async_client: AsyncOpenAI) -> None: file_id="", vector_store_id="vs_abc123", ) + + +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +def test_create_and_poll_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: + checking_client: OpenAI | AsyncOpenAI = client if sync else async_client + + assert_signatures_in_sync( + checking_client.vector_stores.files.create, + checking_client.vector_stores.files.create_and_poll, + exclude_params={"extra_headers", "extra_query", "extra_body", "timeout"}, + ) From 48121221f2797d6674c24c873a897b5eaa591671 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 7 Jul 2025 13:39:27 +0100 Subject: [PATCH 08/90] fix(vector stores): add missing arguments to files.upload_and_poll --- src/openai/resources/vector_stores/files.py | 4 ++++ tests/api_resources/vector_stores/test_files.py | 11 +++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/openai/resources/vector_stores/files.py b/src/openai/resources/vector_stores/files.py index cf5c4c1d11..2c90bb7a1f 100644 --- a/src/openai/resources/vector_stores/files.py +++ b/src/openai/resources/vector_stores/files.py @@ -380,6 +380,7 @@ def upload_and_poll( *, vector_store_id: str, file: FileTypes, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: @@ -390,6 +391,7 @@ def upload_and_poll( file_id=file_obj.id, chunking_strategy=chunking_strategy, poll_interval_ms=poll_interval_ms, + attributes=attributes, ) def content( @@ -788,6 +790,7 @@ async def upload_and_poll( *, vector_store_id: str, file: FileTypes, + attributes: Optional[Dict[str, Union[str, float, bool]]] | NotGiven = NOT_GIVEN, poll_interval_ms: int | NotGiven = NOT_GIVEN, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, ) -> VectorStoreFile: @@ -798,6 +801,7 @@ async def upload_and_poll( file_id=file_obj.id, poll_interval_ms=poll_interval_ms, chunking_strategy=chunking_strategy, + attributes=attributes, ) def content( diff --git a/tests/api_resources/vector_stores/test_files.py b/tests/api_resources/vector_stores/test_files.py index c951a13b3f..7394b50d95 100644 --- a/tests/api_resources/vector_stores/test_files.py +++ b/tests/api_resources/vector_stores/test_files.py @@ -637,3 +637,14 @@ def test_create_and_poll_method_in_sync(sync: bool, client: OpenAI, async_client checking_client.vector_stores.files.create_and_poll, exclude_params={"extra_headers", "extra_query", "extra_body", "timeout"}, ) + + +@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) +def test_upload_and_poll_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: + checking_client: OpenAI | AsyncOpenAI = client if sync else async_client + + assert_signatures_in_sync( + checking_client.vector_stores.files.create, + checking_client.vector_stores.files.upload_and_poll, + exclude_params={"file_id", "extra_headers", "extra_query", "extra_body", "timeout"}, + ) From 266008a12e68881ffa55b02501cd5fcd6ab284d9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 15:56:20 +0000 Subject: [PATCH 09/90] chore(internal): codegen related update --- requirements-dev.lock | 2 +- requirements.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 138fd3b4f6..e560d4f33c 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -81,7 +81,7 @@ httpx==0.28.1 # via httpx-aiohttp # via openai # via respx -httpx-aiohttp==0.1.6 +httpx-aiohttp==0.1.8 # via openai idna==3.4 # via anyio diff --git a/requirements.lock b/requirements.lock index 84cb9276d8..52ad2c0452 100644 --- a/requirements.lock +++ b/requirements.lock @@ -45,7 +45,7 @@ httpcore==1.0.2 httpx==0.28.1 # via httpx-aiohttp # via openai -httpx-aiohttp==0.1.6 +httpx-aiohttp==0.1.8 # via openai idna==3.4 # via anyio From c5b77db2ee8d73895b179ae859c40f4f1ae42437 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 15:56:49 +0000 Subject: [PATCH 10/90] release: 1.93.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 20 ++++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 3ceb8e2f5b..daa7a2a062 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.93.0" + ".": "1.93.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 3274b67105..35d98e9765 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## 1.93.1 (2025-07-07) + +Full Changelog: [v1.93.0...v1.93.1](https://github.com/openai/openai-python/compare/v1.93.0...v1.93.1) + +### Bug Fixes + +* **ci:** correct conditional ([de6a9ce](https://github.com/openai/openai-python/commit/de6a9ce078731d60b0bdc42a9322548c575f11a3)) +* **responses:** add missing arguments to parse ([05590ec](https://github.com/openai/openai-python/commit/05590ec2a96399afd05baf5a3ee1d9a744f09c40)) +* **vector stores:** add missing arguments to files.create_and_poll ([3152134](https://github.com/openai/openai-python/commit/3152134510532ec7c522d6b50a820deea205b602)) +* **vector stores:** add missing arguments to files.upload_and_poll ([9d4f425](https://github.com/openai/openai-python/commit/9d4f42569d5b59311453b1b11ee1dd2e8a271268)) + + +### Chores + +* **ci:** change upload type ([cd4aa88](https://github.com/openai/openai-python/commit/cd4aa889c50581d861728c9606327992485f0d0d)) +* **ci:** only run for pushes and fork pull requests ([f89c7eb](https://github.com/openai/openai-python/commit/f89c7eb46c6f081254715d75543cbee3ffa83822)) +* **internal:** codegen related update ([bddb8d2](https://github.com/openai/openai-python/commit/bddb8d2091455920e8526068d64f3f8a5cac7ae6)) +* **tests:** ensure parse method is in sync with create ([4f58e18](https://github.com/openai/openai-python/commit/4f58e187c12dc8b2c33e9cca284b0429e5cc4de5)) +* **tests:** ensure vector store files create and poll method is in sync ([0fe75a2](https://github.com/openai/openai-python/commit/0fe75a28f6109b2d25b015dc99472a06693e0e9f)) + ## 1.93.0 (2025-06-27) Full Changelog: [v1.92.3...v1.93.0](https://github.com/openai/openai-python/compare/v1.92.3...v1.93.0) diff --git a/pyproject.toml b/pyproject.toml index 0a3e3e1ca8..73efe65b2f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.93.0" +version = "1.93.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 84c3a45a00..289693a91c 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.93.0" # x-release-please-version +__version__ = "1.93.1" # x-release-please-version From cb6fa9c222079d334122b7b66e13dd3b18d5a92a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Jul 2025 14:24:43 +0000 Subject: [PATCH 11/90] chore(internal): bump pinned h11 dep --- requirements-dev.lock | 4 ++-- requirements.lock | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index e560d4f33c..1a7500d569 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -73,9 +73,9 @@ filelock==3.12.4 frozenlist==1.7.0 # via aiohttp # via aiosignal -h11==0.14.0 +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via httpx-aiohttp diff --git a/requirements.lock b/requirements.lock index 52ad2c0452..3b6ece87e2 100644 --- a/requirements.lock +++ b/requirements.lock @@ -38,9 +38,9 @@ exceptiongroup==1.2.2 frozenlist==1.7.0 # via aiohttp # via aiosignal -h11==0.14.0 +h11==0.16.0 # via httpcore -httpcore==1.0.2 +httpcore==1.0.9 # via httpx httpx==0.28.1 # via httpx-aiohttp From 0d42dff3bcd3d5f13c4d14a5f872054f35f53a6d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Jul 2025 15:25:53 +0000 Subject: [PATCH 12/90] chore(package): mark python 3.13 as supported --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 73efe65b2f..9e43f5e7d7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,6 +26,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS", From fe82bb48899919803a7a59b9d6a740b4390d6cec Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 8 Jul 2025 15:26:27 +0000 Subject: [PATCH 13/90] release: 1.93.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index daa7a2a062..02609a40fd 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.93.1" + ".": "1.93.2" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 35d98e9765..92645c8e02 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 1.93.2 (2025-07-08) + +Full Changelog: [v1.93.1...v1.93.2](https://github.com/openai/openai-python/compare/v1.93.1...v1.93.2) + +### Chores + +* **internal:** bump pinned h11 dep ([4fca6ae](https://github.com/openai/openai-python/commit/4fca6ae2d0d7f27cbac8d06c3917932767c8c6b8)) +* **package:** mark python 3.13 as supported ([2229047](https://github.com/openai/openai-python/commit/2229047b8a549df16c617bddfe3b4521cfd257a5)) + ## 1.93.1 (2025-07-07) Full Changelog: [v1.93.0...v1.93.1](https://github.com/openai/openai-python/compare/v1.93.0...v1.93.1) diff --git a/pyproject.toml b/pyproject.toml index 9e43f5e7d7..d1fda0244b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.93.1" +version = "1.93.2" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 289693a91c..a5ddf48daf 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.93.1" # x-release-please-version +__version__ = "1.93.2" # x-release-please-version From 589b0e3d755e8887747ee1c7ea841de2232b9899 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Jul 2025 10:16:11 +0000 Subject: [PATCH 14/90] fix(parsing): correctly handle nested discriminated unions --- src/openai/_models.py | 11 +++++++---- tests/test_models.py | 45 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index 065e8da760..f347a81dac 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -5,6 +5,7 @@ from typing import TYPE_CHECKING, Any, Type, Tuple, Union, Generic, TypeVar, Callable, Optional, cast from datetime import date, datetime from typing_extensions import ( + List, Unpack, Literal, ClassVar, @@ -391,7 +392,7 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") - return construct_type(value=value, type_=type_) + return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) def is_basemodel(type_: type) -> bool: @@ -445,7 +446,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T: return cast(_T, construct_type(value=value, type_=type_)) -def construct_type(*, value: object, type_: object) -> object: +def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object: """Loose coercion to the expected type with construction of nested values. If the given value does not match the expected type then it is returned as-is. @@ -463,8 +464,10 @@ def construct_type(*, value: object, type_: object) -> object: type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if is_annotated_type(type_): - meta: tuple[Any, ...] = get_args(type_)[1:] + if metadata is not None: + meta: tuple[Any, ...] = tuple(metadata) + elif is_annotated_type(type_): + meta = get_args(type_)[1:] type_ = extract_type_arg(type_, 0) else: meta = tuple() diff --git a/tests/test_models.py b/tests/test_models.py index 440e17a08c..7262f45006 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -889,3 +889,48 @@ class ModelB(BaseModel): ) assert isinstance(m, ModelB) + + +def test_nested_discriminated_union() -> None: + class InnerType1(BaseModel): + type: Literal["type_1"] + + class InnerModel(BaseModel): + inner_value: str + + class InnerType2(BaseModel): + type: Literal["type_2"] + some_inner_model: InnerModel + + class Type1(BaseModel): + base_type: Literal["base_type_1"] + value: Annotated[ + Union[ + InnerType1, + InnerType2, + ], + PropertyInfo(discriminator="type"), + ] + + class Type2(BaseModel): + base_type: Literal["base_type_2"] + + T = Annotated[ + Union[ + Type1, + Type2, + ], + PropertyInfo(discriminator="base_type"), + ] + + model = construct_type( + type_=T, + value={ + "base_type": "base_type_1", + "value": { + "type": "type_2", + }, + }, + ) + assert isinstance(model, Type1) + assert isinstance(model.value, InnerType2) From fa8e1cb37681e06da4239d8011687b7dc105365a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 9 Jul 2025 10:16:40 +0000 Subject: [PATCH 15/90] release: 1.93.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 02609a40fd..074ba77967 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.93.2" + ".": "1.93.3" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 92645c8e02..00931cdb79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.93.3 (2025-07-09) + +Full Changelog: [v1.93.2...v1.93.3](https://github.com/openai/openai-python/compare/v1.93.2...v1.93.3) + +### Bug Fixes + +* **parsing:** correctly handle nested discriminated unions ([fc8a677](https://github.com/openai/openai-python/commit/fc8a67715d8f1b45d8639b8b6f9f6590fe358734)) + ## 1.93.2 (2025-07-08) Full Changelog: [v1.93.1...v1.93.2](https://github.com/openai/openai-python/compare/v1.93.1...v1.93.2) diff --git a/pyproject.toml b/pyproject.toml index d1fda0244b..4f3642c922 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.93.2" +version = "1.93.3" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index a5ddf48daf..828e93d58a 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.93.2" # x-release-please-version +__version__ = "1.93.3" # x-release-please-version From 361dc3274b6b48847860cb92bfccb31dd0b546ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Florian=20Sch=C3=BCller?= Date: Thu, 10 Jul 2025 14:48:09 +0200 Subject: [PATCH 16/90] feat(api): return better error message on missing embedding (#2369) --- src/openai/resources/embeddings.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 553dacc284..609f33f3b4 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -112,6 +112,9 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: # don't modify the response object if a user explicitly asked for a format return obj + if not obj.data: + raise ValueError("No embedding data received") + for embedding in obj.data: data = cast(object, embedding.embedding) if not isinstance(data, str): @@ -228,6 +231,9 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: # don't modify the response object if a user explicitly asked for a format return obj + if not obj.data: + raise ValueError("No embedding data received") + for embedding in obj.data: data = cast(object, embedding.embedding) if not isinstance(data, str): From 4d5fe48ee4bb44064c786d175084b7ba7f1bd792 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 10 Jul 2025 12:48:36 +0000 Subject: [PATCH 17/90] release: 1.94.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 074ba77967..6db20a9bfb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.93.3" + ".": "1.94.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 00931cdb79..7c99b6d6c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.94.0 (2025-07-10) + +Full Changelog: [v1.93.3...v1.94.0](https://github.com/openai/openai-python/compare/v1.93.3...v1.94.0) + +### Features + +* **api:** return better error message on missing embedding ([#2369](https://github.com/openai/openai-python/issues/2369)) ([e53464a](https://github.com/openai/openai-python/commit/e53464ae95f6a041f3267762834e6156c5ce1b57)) + ## 1.93.3 (2025-07-09) Full Changelog: [v1.93.2...v1.93.3](https://github.com/openai/openai-python/compare/v1.93.2...v1.93.3) diff --git a/pyproject.toml b/pyproject.toml index 4f3642c922..2c87a67c77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.93.3" +version = "1.94.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 828e93d58a..9ed696d5dd 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.93.3" # x-release-please-version +__version__ = "1.94.0" # x-release-please-version From db5c35049accb05f5fb03791ef9c12547fd309a7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 10 Jul 2025 13:34:57 -0500 Subject: [PATCH 18/90] release: 1.95.0 (#2456) * chore(readme): fix version rendering on pypi * feat(api): add file_url, fix event ID * release: 1.95.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- .stats.yml | 6 +-- CHANGELOG.md | 13 +++++ README.md | 3 +- pyproject.toml | 2 +- src/openai/_version.py | 2 +- src/openai/types/audio/transcription.py | 2 +- .../types/audio/transcription_verbose.py | 2 +- ...put_audio_transcription_completed_event.py | 52 +++++++++++++++++-- src/openai/types/file_object.py | 11 +++- .../types/responses/response_input_file.py | 3 ++ .../responses/response_input_file_param.py | 3 ++ ...response_mcp_call_arguments_delta_event.py | 4 +- .../response_mcp_call_arguments_done_event.py | 4 +- ...onse_output_text_annotation_added_event.py | 4 +- src/openai/types/responses/tool.py | 3 ++ src/openai/types/responses/tool_param.py | 3 ++ 17 files changed, 99 insertions(+), 20 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6db20a9bfb..9a75280778 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.94.0" + ".": "1.95.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 535155f4ae..816f05df5c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-a473967d1766dc155994d932fbc4a5bcbd1c140a37c20d0a4065e1bf0640536d.yml -openapi_spec_hash: 67cdc62b0d6c8b1de29b7dc54b265749 -config_hash: 7b53f96f897ca1b3407a5341a6f820db +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2d116cda53321baa3479e628512def723207a81eb1cdaebb542bd0555e563bda.yml +openapi_spec_hash: 809d958fec261a32004a4b026b718793 +config_hash: e74d6791681e3af1b548748ff47a22c2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c99b6d6c8..f5c49d637f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.95.0 (2025-07-10) + +Full Changelog: [v1.94.0...v1.95.0](https://github.com/openai/openai-python/compare/v1.94.0...v1.95.0) + +### Features + +* **api:** add file_url, fix event ID ([265e216](https://github.com/openai/openai-python/commit/265e216396196d66cdfb5f92c5ef1a2a6ff27b5b)) + + +### Chores + +* **readme:** fix version rendering on pypi ([1eee5ca](https://github.com/openai/openai-python/commit/1eee5cabf2fd93877cd3ba85d0c6ed2ffd5f159f)) + ## 1.94.0 (2025-07-10) Full Changelog: [v1.93.3...v1.94.0](https://github.com/openai/openai-python/compare/v1.93.3...v1.94.0) diff --git a/README.md b/README.md index b38ef578d2..d09de14f3c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # OpenAI Python API library -[![PyPI version]()](https://pypi.org/project/openai/) + +[![PyPI version](https://img.shields.io/pypi/v/openai.svg?label=pypi%20(stable))](https://pypi.org/project/openai/) The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.8+ application. The library includes type definitions for all request params and response fields, diff --git a/pyproject.toml b/pyproject.toml index 2c87a67c77..774f1a35b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.94.0" +version = "1.95.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 9ed696d5dd..342202129c 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.94.0" # x-release-please-version +__version__ = "1.95.0" # x-release-please-version diff --git a/src/openai/types/audio/transcription.py b/src/openai/types/audio/transcription.py index 7115eb9edb..4c5882152d 100644 --- a/src/openai/types/audio/transcription.py +++ b/src/openai/types/audio/transcription.py @@ -46,7 +46,7 @@ class UsageTokens(BaseModel): class UsageDuration(BaseModel): - duration: float + seconds: float """Duration of the input audio in seconds.""" type: Literal["duration"] diff --git a/src/openai/types/audio/transcription_verbose.py b/src/openai/types/audio/transcription_verbose.py index cc6d769a65..addda71ec6 100644 --- a/src/openai/types/audio/transcription_verbose.py +++ b/src/openai/types/audio/transcription_verbose.py @@ -11,7 +11,7 @@ class Usage(BaseModel): - duration: float + seconds: float """Duration of the input audio in seconds.""" type: Literal["duration"] diff --git a/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py b/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py index 469811693c..e7c457d4b2 100644 --- a/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py +++ b/src/openai/types/beta/realtime/conversation_item_input_audio_transcription_completed_event.py @@ -1,11 +1,54 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional -from typing_extensions import Literal +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias from ...._models import BaseModel -__all__ = ["ConversationItemInputAudioTranscriptionCompletedEvent", "Logprob"] +__all__ = [ + "ConversationItemInputAudioTranscriptionCompletedEvent", + "Usage", + "UsageTranscriptTextUsageTokens", + "UsageTranscriptTextUsageTokensInputTokenDetails", + "UsageTranscriptTextUsageDuration", + "Logprob", +] + + +class UsageTranscriptTextUsageTokensInputTokenDetails(BaseModel): + audio_tokens: Optional[int] = None + """Number of audio tokens billed for this request.""" + + text_tokens: Optional[int] = None + """Number of text tokens billed for this request.""" + + +class UsageTranscriptTextUsageTokens(BaseModel): + input_tokens: int + """Number of input tokens billed for this request.""" + + output_tokens: int + """Number of output tokens generated.""" + + total_tokens: int + """Total number of tokens used (input + output).""" + + type: Literal["tokens"] + """The type of the usage object. Always `tokens` for this variant.""" + + input_token_details: Optional[UsageTranscriptTextUsageTokensInputTokenDetails] = None + """Details about the input tokens billed for this request.""" + + +class UsageTranscriptTextUsageDuration(BaseModel): + seconds: float + """Duration of the input audio in seconds.""" + + type: Literal["duration"] + """The type of the usage object. Always `duration` for this variant.""" + + +Usage: TypeAlias = Union[UsageTranscriptTextUsageTokens, UsageTranscriptTextUsageDuration] class Logprob(BaseModel): @@ -37,5 +80,8 @@ class ConversationItemInputAudioTranscriptionCompletedEvent(BaseModel): The event type, must be `conversation.item.input_audio_transcription.completed`. """ + usage: Usage + """Usage statistics for the transcription.""" + logprobs: Optional[List[Logprob]] = None """The log probabilities of the transcription.""" diff --git a/src/openai/types/file_object.py b/src/openai/types/file_object.py index 1d65e6987d..883c2de019 100644 --- a/src/openai/types/file_object.py +++ b/src/openai/types/file_object.py @@ -25,12 +25,19 @@ class FileObject(BaseModel): """The object type, which is always `file`.""" purpose: Literal[ - "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "vision" + "assistants", + "assistants_output", + "batch", + "batch_output", + "fine-tune", + "fine-tune-results", + "vision", + "user_data", ] """The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, - `fine-tune`, `fine-tune-results` and `vision`. + `fine-tune`, `fine-tune-results`, `vision`, and `user_data`. """ status: Literal["uploaded", "processed", "error"] diff --git a/src/openai/types/responses/response_input_file.py b/src/openai/types/responses/response_input_file.py index 00b35dc844..1eecd6a2b6 100644 --- a/src/openai/types/responses/response_input_file.py +++ b/src/openai/types/responses/response_input_file.py @@ -18,5 +18,8 @@ class ResponseInputFile(BaseModel): file_id: Optional[str] = None """The ID of the file to be sent to the model.""" + file_url: Optional[str] = None + """The URL of the file to be sent to the model.""" + filename: Optional[str] = None """The name of the file to be sent to the model.""" diff --git a/src/openai/types/responses/response_input_file_param.py b/src/openai/types/responses/response_input_file_param.py index 61ae46f0cb..0b5f513ec6 100644 --- a/src/openai/types/responses/response_input_file_param.py +++ b/src/openai/types/responses/response_input_file_param.py @@ -18,5 +18,8 @@ class ResponseInputFileParam(TypedDict, total=False): file_id: Optional[str] """The ID of the file to be sent to the model.""" + file_url: str + """The URL of the file to be sent to the model.""" + filename: str """The name of the file to be sent to the model.""" diff --git a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py index d6651e6999..8481506dc3 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py @@ -20,5 +20,5 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel): sequence_number: int """The sequence number of this event.""" - type: Literal["response.mcp_call.arguments_delta"] - """The type of the event. Always 'response.mcp_call.arguments_delta'.""" + type: Literal["response.mcp_call_arguments.delta"] + """The type of the event. Always 'response.mcp_call_arguments.delta'.""" diff --git a/src/openai/types/responses/response_mcp_call_arguments_done_event.py b/src/openai/types/responses/response_mcp_call_arguments_done_event.py index a7ce46ad36..4be09d4862 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_done_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_done_event.py @@ -20,5 +20,5 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel): sequence_number: int """The sequence number of this event.""" - type: Literal["response.mcp_call.arguments_done"] - """The type of the event. Always 'response.mcp_call.arguments_done'.""" + type: Literal["response.mcp_call_arguments.done"] + """The type of the event. Always 'response.mcp_call_arguments.done'.""" diff --git a/src/openai/types/responses/response_output_text_annotation_added_event.py b/src/openai/types/responses/response_output_text_annotation_added_event.py index ce96790c92..62d8f72863 100644 --- a/src/openai/types/responses/response_output_text_annotation_added_event.py +++ b/src/openai/types/responses/response_output_text_annotation_added_event.py @@ -26,5 +26,5 @@ class ResponseOutputTextAnnotationAddedEvent(BaseModel): sequence_number: int """The sequence number of this event.""" - type: Literal["response.output_text_annotation.added"] - """The type of the event. Always 'response.output_text_annotation.added'.""" + type: Literal["response.output_text.annotation.added"] + """The type of the event. Always 'response.output_text.annotation.added'.""" diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index 904c474e40..9c1573bda9 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -79,6 +79,9 @@ class Mcp(BaseModel): require_approval: Optional[McpRequireApproval] = None """Specify which of the MCP server's tools require approval.""" + server_description: Optional[str] = None + """Optional description of the MCP server, used to provide more context.""" + class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): type: Literal["auto"] diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index 4174560d42..493a1dad9c 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -80,6 +80,9 @@ class Mcp(TypedDict, total=False): require_approval: Optional[McpRequireApproval] """Specify which of the MCP server's tools require approval.""" + server_description: str + """Optional description of the MCP server, used to provide more context.""" + class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): type: Required[Literal["auto"]] From fcbb59831c12e9d0a1dae1880d4f650c57de5294 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 11 Jul 2025 12:12:33 +0000 Subject: [PATCH 19/90] fix(client): don't send Content-Type header on GET requests --- pyproject.toml | 2 +- src/openai/_base_client.py | 11 +++++++++-- tests/test_client.py | 4 ++-- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 774f1a35b0..f423907080 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,7 @@ Repository = "https://github.com/openai/openai-python" openai = "openai.cli:main" [project.optional-dependencies] -aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"] +aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"] realtime = ["websockets >= 13, < 16"] datalib = ["numpy >= 1", "pandas >= 1.2.3", "pandas-stubs >= 1.1.0.11"] voice_helpers = ["sounddevice>=0.5.1", "numpy>=2.0.2"] diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 0a6385a7b5..3fe669259f 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -531,6 +531,15 @@ def _build_request( # work around https://github.com/encode/httpx/discussions/2880 kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")} + is_body_allowed = options.method.lower() != "get" + + if is_body_allowed: + kwargs["json"] = json_data if is_given(json_data) else None + kwargs["files"] = files + else: + headers.pop("Content-Type", None) + kwargs.pop("data", None) + # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, @@ -542,8 +551,6 @@ def _build_request( # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, - json=json_data if is_given(json_data) else None, - files=files, **kwargs, ) diff --git a/tests/test_client.py b/tests/test_client.py index 988e5d994c..ccda50a7f0 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -463,7 +463,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, client: OpenAI) -> None: request = client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, @@ -1348,7 +1348,7 @@ def test_request_extra_query(self) -> None: def test_multipart_repeating_array(self, async_client: AsyncOpenAI) -> None: request = async_client._build_request( FinalRequestOptions.construct( - method="get", + method="post", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, From 0fa4028ac5b20c49aa0d3ed69dea2dcf277db574 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 11 Jul 2025 18:29:28 +0000 Subject: [PATCH 20/90] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 816f05df5c..0a24d32759 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2d116cda53321baa3479e628512def723207a81eb1cdaebb542bd0555e563bda.yml openapi_spec_hash: 809d958fec261a32004a4b026b718793 -config_hash: e74d6791681e3af1b548748ff47a22c2 +config_hash: 00b55237774c015fc35f58d2820759a9 From 043589aebf4848dfa977f2b9d0a40a2de0dde95e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 11 Jul 2025 18:32:46 +0000 Subject: [PATCH 21/90] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 0a24d32759..295b77b5af 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2d116cda53321baa3479e628512def723207a81eb1cdaebb542bd0555e563bda.yml openapi_spec_hash: 809d958fec261a32004a4b026b718793 -config_hash: 00b55237774c015fc35f58d2820759a9 +config_hash: 5ef02e55671aae1ba9bd62fe4eb0f50f From 05e3755b8fd8f03adca94eb6797c0c21b564fa80 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 11 Jul 2025 20:38:34 +0000 Subject: [PATCH 22/90] codegen metadata --- .stats.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.stats.yml b/.stats.yml index 295b77b5af..b82cec4eb6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-2d116cda53321baa3479e628512def723207a81eb1cdaebb542bd0555e563bda.yml -openapi_spec_hash: 809d958fec261a32004a4b026b718793 -config_hash: 5ef02e55671aae1ba9bd62fe4eb0f50f +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-de3e91790d0b9f3ce26d679ac07079880ccc695bd8c878f961c4d577a5025a2e.yml +openapi_spec_hash: 4b44e3f287583d01fbe7b10cd943254a +config_hash: 06b9a88561844d60d8efa4eaabf5fa3c From 1c0b4642054544af92c0c3a8cdf5ef3c3f62f1d7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 11 Jul 2025 20:39:01 +0000 Subject: [PATCH 23/90] release: 1.95.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9a75280778..ffcd85673c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.95.0" + ".": "1.95.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index f5c49d637f..14d61de1bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.95.1 (2025-07-11) + +Full Changelog: [v1.95.0...v1.95.1](https://github.com/openai/openai-python/compare/v1.95.0...v1.95.1) + +### Bug Fixes + +* **client:** don't send Content-Type header on GET requests ([182b763](https://github.com/openai/openai-python/commit/182b763065fbaaf68491a7e4a15fcb23cac361de)) + ## 1.95.0 (2025-07-10) Full Changelog: [v1.94.0...v1.95.0](https://github.com/openai/openai-python/compare/v1.94.0...v1.95.0) diff --git a/pyproject.toml b/pyproject.toml index f423907080..d9305c5469 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.95.0" +version = "1.95.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 342202129c..6e2b83bbaa 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.95.0" # x-release-please-version +__version__ = "1.95.1" # x-release-please-version From 2028ad2b95f3e8f7736d45d730c0cc53852c392c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 14 Jul 2025 17:29:56 +0000 Subject: [PATCH 24/90] feat: clean up environment call outs --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index d09de14f3c..d4b8d8d170 100644 --- a/README.md +++ b/README.md @@ -160,7 +160,6 @@ pip install openai[aiohttp] Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: ```python -import os import asyncio from openai import DefaultAioHttpClient from openai import AsyncOpenAI @@ -168,7 +167,7 @@ from openai import AsyncOpenAI async def main() -> None: async with AsyncOpenAI( - api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted + api_key="My API Key", http_client=DefaultAioHttpClient(), ) as client: chat_completion = await client.chat.completions.create( From 1cb2bf6e0afa3d4c52c0f4d5e2ffeccaa7339624 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 13:48:50 +0000 Subject: [PATCH 25/90] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index b82cec4eb6..a146676471 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-de3e91790d0b9f3ce26d679ac07079880ccc695bd8c878f961c4d577a5025a2e.yml openapi_spec_hash: 4b44e3f287583d01fbe7b10cd943254a -config_hash: 06b9a88561844d60d8efa4eaabf5fa3c +config_hash: cc92d0be2a0f3c77bfc988082dd0573e From 34a565164878d97d13fb2d3f7b5602fe73ad332d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 15:46:45 +0000 Subject: [PATCH 26/90] chore(api): update realtime specs, build config --- .stats.yml | 6 ++--- .../types/beta/realtime/conversation_item.py | 4 ++-- .../conversation_item_created_event.py | 12 ++++++---- .../beta/realtime/conversation_item_param.py | 4 ++-- .../conversation_item_with_reference.py | 4 ++-- .../conversation_item_with_reference_param.py | 4 ++-- .../input_audio_buffer_committed_event.py | 10 +++++--- .../types/beta/realtime/realtime_response.py | 4 ++-- src/openai/types/eval_create_params.py | 23 +++++++++++++++++-- ...create_eval_completions_run_data_source.py | 23 +++++++++++++++++-- ..._eval_completions_run_data_source_param.py | 23 +++++++++++++++++-- src/openai/types/evals/run_cancel_response.py | 23 +++++++++++++++++-- src/openai/types/evals/run_create_params.py | 21 ++++++++++++++++- src/openai/types/evals/run_create_response.py | 23 +++++++++++++++++-- src/openai/types/evals/run_list_response.py | 23 +++++++++++++++++-- .../types/evals/run_retrieve_response.py | 23 +++++++++++++++++-- .../types/graders/label_model_grader.py | 20 +++++++++++++--- .../types/graders/label_model_grader_param.py | 22 +++++++++++++++--- .../types/graders/score_model_grader.py | 20 +++++++++++++--- .../types/graders/score_model_grader_param.py | 22 +++++++++++++++--- 20 files changed, 266 insertions(+), 48 deletions(-) diff --git a/.stats.yml b/.stats.yml index a146676471..12a179baf6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-de3e91790d0b9f3ce26d679ac07079880ccc695bd8c878f961c4d577a5025a2e.yml -openapi_spec_hash: 4b44e3f287583d01fbe7b10cd943254a -config_hash: cc92d0be2a0f3c77bfc988082dd0573e +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-82fd6fcb3eea81cbbe09a6f831c82219f1251e1b76474b4c41f424bf277e6a71.yml +openapi_spec_hash: c8d54bd1ae3d704f6b6f72ffd2f876d8 +config_hash: 3315d58b60faf63b1bee251b81837cda diff --git a/src/openai/types/beta/realtime/conversation_item.py b/src/openai/types/beta/realtime/conversation_item.py index 4edf6c4d5f..21b7a8ac1f 100644 --- a/src/openai/types/beta/realtime/conversation_item.py +++ b/src/openai/types/beta/realtime/conversation_item.py @@ -50,8 +50,8 @@ class ConversationItem(BaseModel): for `message` items. """ - status: Optional[Literal["completed", "incomplete"]] = None - """The status of the item (`completed`, `incomplete`). + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/conversation_item_created_event.py b/src/openai/types/beta/realtime/conversation_item_created_event.py index 2f20388246..aea7ad5b4b 100644 --- a/src/openai/types/beta/realtime/conversation_item_created_event.py +++ b/src/openai/types/beta/realtime/conversation_item_created_event.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from ...._models import BaseModel @@ -15,11 +16,12 @@ class ConversationItemCreatedEvent(BaseModel): item: ConversationItem """The item to add to the conversation.""" - previous_item_id: str + type: Literal["conversation.item.created"] + """The event type, must be `conversation.item.created`.""" + + previous_item_id: Optional[str] = None """ The ID of the preceding item in the Conversation context, allows the client to - understand the order of the conversation. + understand the order of the conversation. Can be `null` if the item has no + predecessor. """ - - type: Literal["conversation.item.created"] - """The event type, must be `conversation.item.created`.""" diff --git a/src/openai/types/beta/realtime/conversation_item_param.py b/src/openai/types/beta/realtime/conversation_item_param.py index ac0f8431e5..8bbd539c0c 100644 --- a/src/openai/types/beta/realtime/conversation_item_param.py +++ b/src/openai/types/beta/realtime/conversation_item_param.py @@ -51,8 +51,8 @@ class ConversationItemParam(TypedDict, total=False): for `message` items. """ - status: Literal["completed", "incomplete"] - """The status of the item (`completed`, `incomplete`). + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference.py b/src/openai/types/beta/realtime/conversation_item_with_reference.py index 31806afc33..dec7a5a409 100644 --- a/src/openai/types/beta/realtime/conversation_item_with_reference.py +++ b/src/openai/types/beta/realtime/conversation_item_with_reference.py @@ -53,8 +53,8 @@ class ConversationItemWithReference(BaseModel): for `message` items. """ - status: Optional[Literal["completed", "incomplete"]] = None - """The status of the item (`completed`, `incomplete`). + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py index e266cdce32..3778373a4c 100644 --- a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py +++ b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py @@ -54,8 +54,8 @@ class ConversationItemWithReferenceParam(TypedDict, total=False): for `message` items. """ - status: Literal["completed", "incomplete"] - """The status of the item (`completed`, `incomplete`). + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py b/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py index 3071eff357..22eb53b117 100644 --- a/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py +++ b/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from ...._models import BaseModel @@ -14,8 +15,11 @@ class InputAudioBufferCommittedEvent(BaseModel): item_id: str """The ID of the user message item that will be created.""" - previous_item_id: str - """The ID of the preceding item after which the new item will be inserted.""" - type: Literal["input_audio_buffer.committed"] """The event type, must be `input_audio_buffer.committed`.""" + + previous_item_id: Optional[str] = None + """ + The ID of the preceding item after which the new item will be inserted. Can be + `null` if the item has no predecessor. + """ diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py index 8ecfb91c31..28e03c8717 100644 --- a/src/openai/types/beta/realtime/realtime_response.py +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -60,10 +60,10 @@ class RealtimeResponse(BaseModel): output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" - status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None + status: Optional[Literal["completed", "cancelled", "failed", "incomplete", "in_progress"]] = None """ The final status of the response (`completed`, `cancelled`, `failed`, or - `incomplete`). + `incomplete`, `in_progress`). """ status_details: Optional[RealtimeResponseStatus] = None diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py index 20a3765481..9674785701 100644 --- a/src/openai/types/eval_create_params.py +++ b/src/openai/types/eval_create_params.py @@ -25,6 +25,7 @@ "TestingCriterionLabelModelInputEvalItem", "TestingCriterionLabelModelInputEvalItemContent", "TestingCriterionLabelModelInputEvalItemContentOutputText", + "TestingCriterionLabelModelInputEvalItemContentInputImage", "TestingCriterionTextSimilarity", "TestingCriterionPython", "TestingCriterionScoreModel", @@ -109,14 +110,32 @@ class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total= """The type of the output text. Always `output_text`.""" +class TestingCriterionLabelModelInputEvalItemContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[ - str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText + str, + ResponseInputTextParam, + TestingCriterionLabelModelInputEvalItemContentOutputText, + TestingCriterionLabelModelInputEvalItemContentInputImage, + Iterable[object], ] class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): content: Required[TestingCriterionLabelModelInputEvalItemContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index 0a942cd200..a0eaa5addb 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -26,6 +26,7 @@ "InputMessagesTemplateTemplateMessage", "InputMessagesTemplateTemplateMessageContent", "InputMessagesTemplateTemplateMessageContentOutputText", + "InputMessagesTemplateTemplateMessageContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -94,14 +95,32 @@ class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" +class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText + str, + ResponseInputText, + InputMessagesTemplateTemplateMessageContentOutputText, + InputMessagesTemplateTemplateMessageContentInputImage, + List[object], ] class InputMessagesTemplateTemplateMessage(BaseModel): content: InputMessagesTemplateTemplateMessageContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index 84344fcd94..8892b68b17 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -26,6 +26,7 @@ "InputMessagesTemplateTemplateMessage", "InputMessagesTemplateTemplateMessageContent", "InputMessagesTemplateTemplateMessageContentOutputText", + "InputMessagesTemplateTemplateMessageContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -92,14 +93,32 @@ class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=Fal """The type of the output text. Always `output_text`.""" +class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText + str, + ResponseInputTextParam, + InputMessagesTemplateTemplateMessageContentOutputText, + InputMessagesTemplateTemplateMessageContentInputImage, + Iterable[object], ] class InputMessagesTemplateTemplateMessage(TypedDict, total=False): content: Required[InputMessagesTemplateTemplateMessageContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index 12cc868045..7f4f4c9cc4 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index 354a81132e..1622b00eb7 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -29,6 +29,7 @@ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference", "DataSourceCreateEvalResponsesRunDataSourceSamplingParams", "DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText", @@ -153,16 +154,34 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEva """The type of the output text. Always `output_text`.""" +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage( + TypedDict, total=False +): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ str, ResponseInputTextParam, DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage, + Iterable[object], ] class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False): content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index 776ebb413f..fba5321552 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index 9e2374f93c..e9e445af5c 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index a4f43ce3f9..e13f1abe42 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/graders/label_model_grader.py b/src/openai/types/graders/label_model_grader.py index d95ccc6df6..76dbfb854a 100644 --- a/src/openai/types/graders/label_model_grader.py +++ b/src/openai/types/graders/label_model_grader.py @@ -6,7 +6,7 @@ from ..._models import BaseModel from ..responses.response_input_text import ResponseInputText -__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(BaseModel): @@ -17,12 +17,26 @@ class InputContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] +class InputContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText, InputContentInputImage, List[object]] class Input(BaseModel): content: InputContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/graders/label_model_grader_param.py b/src/openai/types/graders/label_model_grader_param.py index 76d01421ee..941c8a1bd0 100644 --- a/src/openai/types/graders/label_model_grader_param.py +++ b/src/openai/types/graders/label_model_grader_param.py @@ -7,7 +7,7 @@ from ..responses.response_input_text_param import ResponseInputTextParam -__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(TypedDict, total=False): @@ -18,12 +18,28 @@ class InputContentOutputText(TypedDict, total=False): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] +class InputContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, ResponseInputTextParam, InputContentOutputText, InputContentInputImage, Iterable[object] +] class Input(TypedDict, total=False): content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/graders/score_model_grader.py b/src/openai/types/graders/score_model_grader.py index 1349f75a58..e6af0ebcf7 100644 --- a/src/openai/types/graders/score_model_grader.py +++ b/src/openai/types/graders/score_model_grader.py @@ -6,7 +6,7 @@ from ..._models import BaseModel from ..responses.response_input_text import ResponseInputText -__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(BaseModel): @@ -17,12 +17,26 @@ class InputContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] +class InputContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText, InputContentInputImage, List[object]] class Input(BaseModel): content: InputContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/graders/score_model_grader_param.py b/src/openai/types/graders/score_model_grader_param.py index 673f14e47d..47c9928076 100644 --- a/src/openai/types/graders/score_model_grader_param.py +++ b/src/openai/types/graders/score_model_grader_param.py @@ -7,7 +7,7 @@ from ..responses.response_input_text_param import ResponseInputTextParam -__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(TypedDict, total=False): @@ -18,12 +18,28 @@ class InputContentOutputText(TypedDict, total=False): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] +class InputContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, ResponseInputTextParam, InputContentOutputText, InputContentInputImage, Iterable[object] +] class Input(TypedDict, total=False): content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. From 1d77265e3d31afda8df6528a1926c854ef27de3b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 15:47:15 +0000 Subject: [PATCH 27/90] release: 1.96.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ffcd85673c..db912a0d0f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.95.1" + ".": "1.96.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 14d61de1bf..c91c4c4b35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.96.0 (2025-07-15) + +Full Changelog: [v1.95.1...v1.96.0](https://github.com/openai/openai-python/compare/v1.95.1...v1.96.0) + +### Features + +* clean up environment call outs ([87c2e97](https://github.com/openai/openai-python/commit/87c2e979e0ec37347b7f595c2696408acd25fe20)) + + +### Chores + +* **api:** update realtime specs, build config ([bf06d88](https://github.com/openai/openai-python/commit/bf06d88b33f9af82a51d9a8af5b7a38925906f7a)) + ## 1.95.1 (2025-07-11) Full Changelog: [v1.95.0...v1.95.1](https://github.com/openai/openai-python/compare/v1.95.0...v1.95.1) diff --git a/pyproject.toml b/pyproject.toml index d9305c5469..65055d926a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.95.1" +version = "1.96.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 6e2b83bbaa..b1025f4a31 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.95.1" # x-release-please-version +__version__ = "1.96.0" # x-release-please-version From 7bbb31cba0b056a191277a63e9798ffc4c3f7586 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 16:20:27 +0000 Subject: [PATCH 28/90] codegen metadata --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 12a179baf6..7d1cdd14ad 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-82fd6fcb3eea81cbbe09a6f831c82219f1251e1b76474b4c41f424bf277e6a71.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-79dcb0ae501ac17004f50aecb112a798290ab3727fbe7c7d1b34299e38ed4f8e.yml openapi_spec_hash: c8d54bd1ae3d704f6b6f72ffd2f876d8 -config_hash: 3315d58b60faf63b1bee251b81837cda +config_hash: 167ad0ca036d0f023c78e6496b4311e8 From 3876ddc28e833aca190d6ec8eaf3b42c979f6e99 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 21:27:39 +0000 Subject: [PATCH 29/90] chore(api): update realtime specs --- .stats.yml | 4 +-- .../realtime/conversation_item_content.py | 9 ++++--- .../conversation_item_content_param.py | 9 ++++--- .../conversation_item_with_reference.py | 26 ++++++++++++++++--- .../conversation_item_with_reference_param.py | 25 +++++++++++++++--- 5 files changed, 59 insertions(+), 14 deletions(-) diff --git a/.stats.yml b/.stats.yml index 7d1cdd14ad..571b0ee797 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-79dcb0ae501ac17004f50aecb112a798290ab3727fbe7c7d1b34299e38ed4f8e.yml -openapi_spec_hash: c8d54bd1ae3d704f6b6f72ffd2f876d8 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c7dacca97e28bceff218684bb429481a70aa47aadad983ed9178bfda75ff4cd2.yml +openapi_spec_hash: 28eb1bb901ca10d2e37db4606d2bcfa7 config_hash: 167ad0ca036d0f023c78e6496b4311e8 diff --git a/src/openai/types/beta/realtime/conversation_item_content.py b/src/openai/types/beta/realtime/conversation_item_content.py index ab40a4a1a7..fe9cef80e3 100644 --- a/src/openai/types/beta/realtime/conversation_item_content.py +++ b/src/openai/types/beta/realtime/conversation_item_content.py @@ -23,7 +23,10 @@ class ConversationItemContent(BaseModel): """The text content, used for `input_text` and `text` content types.""" transcript: Optional[str] = None - """The transcript of the audio, used for `input_audio` content type.""" + """The transcript of the audio, used for `input_audio` and `audio` content types.""" - type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None - """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" + type: Optional[Literal["input_text", "input_audio", "item_reference", "text", "audio"]] = None + """ + The content type (`input_text`, `input_audio`, `item_reference`, `text`, + `audio`). + """ diff --git a/src/openai/types/beta/realtime/conversation_item_content_param.py b/src/openai/types/beta/realtime/conversation_item_content_param.py index 7a3a92a39d..6042e7f90f 100644 --- a/src/openai/types/beta/realtime/conversation_item_content_param.py +++ b/src/openai/types/beta/realtime/conversation_item_content_param.py @@ -22,7 +22,10 @@ class ConversationItemContentParam(TypedDict, total=False): """The text content, used for `input_text` and `text` content types.""" transcript: str - """The transcript of the audio, used for `input_audio` content type.""" + """The transcript of the audio, used for `input_audio` and `audio` content types.""" - type: Literal["input_text", "input_audio", "item_reference", "text"] - """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" + type: Literal["input_text", "input_audio", "item_reference", "text", "audio"] + """ + The content type (`input_text`, `input_audio`, `item_reference`, `text`, + `audio`). + """ diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference.py b/src/openai/types/beta/realtime/conversation_item_with_reference.py index dec7a5a409..0edcfc76b6 100644 --- a/src/openai/types/beta/realtime/conversation_item_with_reference.py +++ b/src/openai/types/beta/realtime/conversation_item_with_reference.py @@ -4,9 +4,29 @@ from typing_extensions import Literal from ...._models import BaseModel -from .conversation_item_content import ConversationItemContent -__all__ = ["ConversationItemWithReference"] +__all__ = ["ConversationItemWithReference", "Content"] + + +class Content(BaseModel): + id: Optional[str] = None + """ + ID of a previous conversation item to reference (for `item_reference` content + types in `response.create` events). These can reference both client and server + created items. + """ + + audio: Optional[str] = None + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: Optional[str] = None + """The text content, used for `input_text` and `text` content types.""" + + transcript: Optional[str] = None + """The transcript of the audio, used for `input_audio` content type.""" + + type: Optional[Literal["input_text", "input_audio", "item_reference", "text"]] = None + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" class ConversationItemWithReference(BaseModel): @@ -30,7 +50,7 @@ class ConversationItemWithReference(BaseModel): `function_call` item with the same ID exists in the conversation history. """ - content: Optional[List[ConversationItemContent]] = None + content: Optional[List[Content]] = None """The content of the message, applicable for `message` items. - Message items of role `system` support only `input_text` content diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py index 3778373a4c..c83dc92ab7 100644 --- a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py +++ b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py @@ -5,9 +5,28 @@ from typing import Iterable from typing_extensions import Literal, TypedDict -from .conversation_item_content_param import ConversationItemContentParam +__all__ = ["ConversationItemWithReferenceParam", "Content"] -__all__ = ["ConversationItemWithReferenceParam"] + +class Content(TypedDict, total=False): + id: str + """ + ID of a previous conversation item to reference (for `item_reference` content + types in `response.create` events). These can reference both client and server + created items. + """ + + audio: str + """Base64-encoded audio bytes, used for `input_audio` content type.""" + + text: str + """The text content, used for `input_text` and `text` content types.""" + + transcript: str + """The transcript of the audio, used for `input_audio` content type.""" + + type: Literal["input_text", "input_audio", "item_reference", "text"] + """The content type (`input_text`, `input_audio`, `item_reference`, `text`).""" class ConversationItemWithReferenceParam(TypedDict, total=False): @@ -31,7 +50,7 @@ class ConversationItemWithReferenceParam(TypedDict, total=False): `function_call` item with the same ID exists in the conversation history. """ - content: Iterable[ConversationItemContentParam] + content: Iterable[Content] """The content of the message, applicable for `message` items. - Message items of role `system` support only `input_text` content From 859b4db4a7b3c229cd4c19eb21642faca007530b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 15 Jul 2025 21:28:05 +0000 Subject: [PATCH 30/90] release: 1.96.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index db912a0d0f..6b38a1bd5a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.96.0" + ".": "1.96.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index c91c4c4b35..93bfb63f37 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.96.1 (2025-07-15) + +Full Changelog: [v1.96.0...v1.96.1](https://github.com/openai/openai-python/compare/v1.96.0...v1.96.1) + +### Chores + +* **api:** update realtime specs ([b68b71b](https://github.com/openai/openai-python/commit/b68b71b178719e0b49ecfe34486b9d9ac0627924)) + ## 1.96.0 (2025-07-15) Full Changelog: [v1.95.1...v1.96.0](https://github.com/openai/openai-python/compare/v1.95.1...v1.96.0) diff --git a/pyproject.toml b/pyproject.toml index 65055d926a..0f655d058d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.96.0" +version = "1.96.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index b1025f4a31..39be0338f6 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.96.0" # x-release-please-version +__version__ = "1.96.1" # x-release-please-version From a85ad051aa4e6cf4f81a51714afc7bc90310e047 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:24:53 +0000 Subject: [PATCH 31/90] feat(api): manual updates --- .stats.yml | 6 +- api.md | 12 +- examples/image_stream.py | 53 + src/openai/_streaming.py | 7 +- src/openai/resources/images.py | 1453 ++++++++++++++--- src/openai/types/__init__.py | 6 + .../types/image_edit_completed_event.py | 55 + src/openai/types/image_edit_params.py | 42 +- .../types/image_edit_partial_image_event.py | 33 + src/openai/types/image_edit_stream_event.py | 14 + src/openai/types/image_gen_completed_event.py | 55 + .../types/image_gen_partial_image_event.py | 33 + src/openai/types/image_gen_stream_event.py | 14 + src/openai/types/image_generate_params.py | 35 +- .../responses/response_output_refusal.py | 2 +- .../response_output_refusal_param.py | 2 +- src/openai/types/responses/tool.py | 7 + src/openai/types/responses/tool_param.py | 7 + tests/api_resources/test_images.py | 262 ++- 19 files changed, 1880 insertions(+), 218 deletions(-) create mode 100644 examples/image_stream.py create mode 100644 src/openai/types/image_edit_completed_event.py create mode 100644 src/openai/types/image_edit_partial_image_event.py create mode 100644 src/openai/types/image_edit_stream_event.py create mode 100644 src/openai/types/image_gen_completed_event.py create mode 100644 src/openai/types/image_gen_partial_image_event.py create mode 100644 src/openai/types/image_gen_stream_event.py diff --git a/.stats.yml b/.stats.yml index 571b0ee797..2b9160cf6e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-c7dacca97e28bceff218684bb429481a70aa47aadad983ed9178bfda75ff4cd2.yml -openapi_spec_hash: 28eb1bb901ca10d2e37db4606d2bcfa7 -config_hash: 167ad0ca036d0f023c78e6496b4311e8 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml +openapi_spec_hash: d8b7d38911fead545adf3e4297956410 +config_hash: 5525bda35e48ea6387c6175c4d1651fa diff --git a/api.md b/api.md index abf0de481d..b3a2245cdd 100644 --- a/api.md +++ b/api.md @@ -127,7 +127,17 @@ Methods: Types: ```python -from openai.types import Image, ImageModel, ImagesResponse +from openai.types import ( + Image, + ImageEditCompletedEvent, + ImageEditPartialImageEvent, + ImageEditStreamEvent, + ImageGenCompletedEvent, + ImageGenPartialImageEvent, + ImageGenStreamEvent, + ImageModel, + ImagesResponse, +) ``` Methods: diff --git a/examples/image_stream.py b/examples/image_stream.py new file mode 100644 index 0000000000..c188e68717 --- /dev/null +++ b/examples/image_stream.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +import base64 +from pathlib import Path + +from openai import OpenAI + +client = OpenAI() + + +def main() -> None: + """Example of OpenAI image streaming with partial images.""" + stream = client.images.generate( + model="gpt-image-1", + prompt="A cute baby sea otter", + n=1, + size="1024x1024", + stream=True, + partial_images=3, + ) + + for event in stream: + if event.type == "image_generation.partial_image": + print(f" Partial image {event.partial_image_index + 1}/3 received") + print(f" Size: {len(event.b64_json)} characters (base64)") + + # Save partial image to file + filename = f"partial_{event.partial_image_index + 1}.png" + image_data = base64.b64decode(event.b64_json) + with open(filename, "wb") as f: + f.write(image_data) + print(f" 💾 Saved to: {Path(filename).resolve()}") + + elif event.type == "image_generation.completed": + print(f"\n✅ Final image completed!") + print(f" Size: {len(event.b64_json)} characters (base64)") + + # Save final image to file + filename = "final_image.png" + image_data = base64.b64decode(event.b64_json) + with open(filename, "wb") as f: + f.write(image_data) + print(f" 💾 Saved to: {Path(filename).resolve()}") + + else: + print(f"❓ Unknown event: {event}") # type: ignore[unreachable] + + +if __name__ == "__main__": + try: + main() + except Exception as error: + print(f"Error generating image: {error}") \ No newline at end of file diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index f5621f92a7..fa0a30e183 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -59,7 +59,12 @@ def __stream__(self) -> Iterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."): + if sse.event is None or ( + sse.event.startswith("response.") or + sse.event.startswith("transcript.") or + sse.event.startswith("image_edit.") or + sse.event.startswith("image_generation.") + ): data = sse.json() if is_mapping(data) and data.get("error"): message = None diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 43f6189f91..77b7a1b24e 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -3,20 +3,23 @@ from __future__ import annotations from typing import List, Union, Mapping, Optional, cast -from typing_extensions import Literal +from typing_extensions import Literal, overload import httpx from .. import _legacy_response from ..types import image_edit_params, image_generate_params, image_create_variation_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes -from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform +from .._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from .._streaming import Stream, AsyncStream from .._base_client import make_request_options from ..types.image_model import ImageModel from ..types.images_response import ImagesResponse +from ..types.image_gen_stream_event import ImageGenStreamEvent +from ..types.image_edit_stream_event import ImageEditStreamEvent __all__ = ["Images", "AsyncImages"] @@ -114,21 +117,25 @@ def create_variation( cast_to=ImagesResponse, ) + @overload def edit( self, *, image: Union[FileTypes, List[FileTypes]], prompt: str, background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, output_compression: Optional[int] | NotGiven = NOT_GIVEN, output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -162,6 +169,234 @@ def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: Literal[True], + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Stream[ImageEditStreamEvent]: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: bool, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | Stream[ImageEditStreamEvent]: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. If there are multiple images provided, the mask will be applied on the first image. Must be a valid PNG file, less than @@ -181,6 +416,10 @@ def edit( supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The default value is `png`. + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -206,19 +445,51 @@ def edit( timeout: Override the client-level default timeout for this request, in seconds """ + ... + + @required_args(["image", "prompt"], ["image", "prompt", "stream"]) + def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | Stream[ImageEditStreamEvent]: body = deepcopy_minimal( { "image": image, "prompt": prompt, "background": background, + "input_fidelity": input_fidelity, "mask": mask, "model": model, "n": n, "output_compression": output_compression, "output_format": output_format, + "partial_images": partial_images, "quality": quality, "response_format": response_format, "size": size, + "stream": stream, "user": user, } ) @@ -229,15 +500,891 @@ def edit( extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/images/edits", - body=maybe_transform(body, image_edit_params.ImageEditParams), + body=maybe_transform( + body, + image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming, + ), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + stream=stream or False, + stream_cls=Stream[ImageEditStreamEvent], + ) + + @overload + def generate( + self, + *, + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def generate( + self, + *, + prompt: str, + stream: Literal[True], + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Stream[ImageGenStreamEvent]: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + def generate( + self, + *, + prompt: str, + stream: bool, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | Stream[ImageGenStreamEvent]: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). + + Args: + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. + + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. + + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). + + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + def generate( + self, + *, + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | Stream[ImageGenStreamEvent]: + return self._post( + "/images/generations", + body=maybe_transform( + { + "prompt": prompt, + "background": background, + "model": model, + "moderation": moderation, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "response_format": response_format, + "size": size, + "stream": stream, + "style": style, + "user": user, + }, + image_generate_params.ImageGenerateParamsStreaming + if stream + else image_generate_params.ImageGenerateParamsNonStreaming, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + stream=stream or False, + stream_cls=Stream[ImageGenStreamEvent], + ) + + +class AsyncImages(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncImagesWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncImagesWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncImagesWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncImagesWithStreamingResponse(self) + + async def create_variation( + self, + *, + image: FileTypes, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """Creates a variation of a given image. + + This endpoint only supports `dall-e-2`. + + Args: + image: The image to use as the basis for the variation(s). Must be a valid PNG file, + less than 4MB, and square. + + model: The model to use for image generation. Only `dall-e-2` is supported at this + time. + + n: The number of images to generate. Must be between 1 and 10. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. + + size: The size of the generated images. Must be one of `256x256`, `512x512`, or + `1024x1024`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + body = deepcopy_minimal( + { + "image": image, + "model": model, + "n": n, + "response_format": response_format, + "size": size, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/images/variations", + body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), + files=files, + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ImagesResponse, + ) + + @overload + async def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: Literal[True], + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncStream[ImageEditStreamEvent]: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @overload + async def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + stream: bool, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]: + """Creates an edited or extended image given one or more source images and a + prompt. + + This endpoint only supports `gpt-image-1` and `dall-e-2`. + + Args: + image: The image(s) to edit. Must be a supported image file or an array of images. + + For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than + 50MB. You can provide up to 16 images. + + For `dall-e-2`, you can only provide one image, and it should be a square `png` + file less than 4MB. + + prompt: A text description of the desired image(s). The maximum length is 1000 + characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + + stream: Edit the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + + background: Allows to set transparency for the background of the generated image(s). This + parameter is only supported for `gpt-image-1`. Must be one of `transparent`, + `opaque` or `auto` (default value). When `auto` is used, the model will + automatically determine the best background for the image. + + If `transparent`, the output format needs to support transparency, so it should + be set to either `png` (default value) or `webp`. + + input_fidelity: Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + + mask: An additional image whose fully transparent areas (e.g. where alpha is zero) + indicate where `image` should be edited. If there are multiple images provided, + the mask will be applied on the first image. Must be a valid PNG file, less than + 4MB, and have the same dimensions as `image`. + + model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are + supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` + is used. + + n: The number of images to generate. Must be between 1 and 10. + + output_compression: The compression level (0-100%) for the generated images. This parameter is only + supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and + defaults to 100. + + output_format: The format in which the generated images are returned. This parameter is only + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The + default value is `png`. + + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + + quality: The quality of the image that will be generated. `high`, `medium` and `low` are + only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. + Defaults to `auto`. + + response_format: The format in which the generated images are returned. Must be one of `url` or + `b64_json`. URLs are only valid for 60 minutes after the image has been + generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` + will always return base64-encoded images. + + size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` + (landscape), `1024x1536` (portrait), or `auto` (default value) for + `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + + user: A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + ... + + @required_args(["image", "prompt"], ["image", "prompt", "stream"]) + async def edit( + self, + *, + image: Union[FileTypes, List[FileTypes]], + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + input_fidelity: Optional[Literal["high", "low"]] | NotGiven = NOT_GIVEN, + mask: FileTypes | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]: + body = deepcopy_minimal( + { + "image": image, + "prompt": prompt, + "background": background, + "input_fidelity": input_fidelity, + "mask": mask, + "model": model, + "n": n, + "output_compression": output_compression, + "output_format": output_format, + "partial_images": partial_images, + "quality": quality, + "response_format": response_format, + "size": size, + "stream": stream, + "user": user, + } + ) + files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", ""], ["mask"]]) + # It should be noted that the actual Content-Type header that will be + # sent to the server will contain a `boundary` parameter, e.g. + # multipart/form-data; boundary=---abc-- + extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} + return await self._post( + "/images/edits", + body=await async_maybe_transform( + body, + image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming, + ), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, + stream=stream or False, + stream_cls=AsyncStream[ImageEditStreamEvent], ) - def generate( + @overload + async def generate( self, *, prompt: str, @@ -247,12 +1394,14 @@ def generate( n: Optional[int] | NotGiven = NOT_GIVEN, output_compression: Optional[int] | NotGiven = NOT_GIVEN, output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[ Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] ] | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -296,6 +1445,10 @@ def generate( output_format: The format in which the generated images are returned. This parameter is only supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -314,6 +1467,10 @@ def generate( `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + style: The style of the generated images. This parameter is only supported for `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to @@ -331,140 +1488,28 @@ def generate( timeout: Override the client-level default timeout for this request, in seconds """ - return self._post( - "/images/generations", - body=maybe_transform( - { - "prompt": prompt, - "background": background, - "model": model, - "moderation": moderation, - "n": n, - "output_compression": output_compression, - "output_format": output_format, - "quality": quality, - "response_format": response_format, - "size": size, - "style": style, - "user": user, - }, - image_generate_params.ImageGenerateParams, - ), - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) - - -class AsyncImages(AsyncAPIResource): - @cached_property - def with_raw_response(self) -> AsyncImagesWithRawResponse: - """ - This property can be used as a prefix for any HTTP method call to return - the raw response object instead of the parsed content. - - For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers - """ - return AsyncImagesWithRawResponse(self) - - @cached_property - def with_streaming_response(self) -> AsyncImagesWithStreamingResponse: - """ - An alternative to `.with_raw_response` that doesn't eagerly read the response body. - - For more information, see https://www.github.com/openai/openai-python#with_streaming_response - """ - return AsyncImagesWithStreamingResponse(self) - - async def create_variation( - self, - *, - image: FileTypes, - model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, - n: Optional[int] | NotGiven = NOT_GIVEN, - response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, - user: str | NotGiven = NOT_GIVEN, - # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. - # The extra values given here take precedence over values defined on the client or passed to this method. - extra_headers: Headers | None = None, - extra_query: Query | None = None, - extra_body: Body | None = None, - timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """Creates a variation of a given image. - - This endpoint only supports `dall-e-2`. - - Args: - image: The image to use as the basis for the variation(s). Must be a valid PNG file, - less than 4MB, and square. - - model: The model to use for image generation. Only `dall-e-2` is supported at this - time. - - n: The number of images to generate. Must be between 1 and 10. - - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. - - size: The size of the generated images. Must be one of `256x256`, `512x512`, or - `1024x1024`. - - user: A unique identifier representing your end-user, which can help OpenAI to monitor - and detect abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). - - extra_headers: Send extra headers - - extra_query: Add additional query parameters to the request - - extra_body: Add additional JSON properties to the request - - timeout: Override the client-level default timeout for this request, in seconds - """ - body = deepcopy_minimal( - { - "image": image, - "model": model, - "n": n, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/images/variations", - body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) + ... - async def edit( + @overload + async def generate( self, *, - image: Union[FileTypes, List[FileTypes]], prompt: str, + stream: Literal[True], background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, - mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, output_compression: Optional[int] | NotGiven = NOT_GIVEN, output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, - quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, - size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -472,23 +1517,19 @@ async def edit( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: - """Creates an edited or extended image given one or more source images and a - prompt. - - This endpoint only supports `gpt-image-1` and `dall-e-2`. + ) -> AsyncStream[ImageGenStreamEvent]: + """ + Creates an image given a prompt. + [Learn more](https://platform.openai.com/docs/guides/images). Args: - image: The image(s) to edit. Must be a supported image file or an array of images. - - For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than - 50MB. You can provide up to 16 images. - - For `dall-e-2`, you can only provide one image, and it should be a square `png` - file less than 4MB. + prompt: A text description of the desired image(s). The maximum length is 32000 + characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters + for `dall-e-3`. - prompt: A text description of the desired image(s). The maximum length is 1000 - characters for `dall-e-2`, and 32000 characters for `gpt-image-1`. + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. background: Allows to set transparency for the background of the generated image(s). This parameter is only supported for `gpt-image-1`. Must be one of `transparent`, @@ -498,37 +1539,49 @@ async def edit( If `transparent`, the output format needs to support transparency, so it should be set to either `png` (default value) or `webp`. - mask: An additional image whose fully transparent areas (e.g. where alpha is zero) - indicate where `image` should be edited. If there are multiple images provided, - the mask will be applied on the first image. Must be a valid PNG file, less than - 4MB, and have the same dimensions as `image`. + model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or + `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to + `gpt-image-1` is used. - model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are - supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` - is used. + moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must + be either `low` for less restrictive filtering or `auto` (default value). - n: The number of images to generate. Must be between 1 and 10. + n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only + `n=1` is supported. output_compression: The compression level (0-100%) for the generated images. This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and defaults to 100. output_format: The format in which the generated images are returned. This parameter is only - supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The - default value is `png`. + supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. - quality: The quality of the image that will be generated. `high`, `medium` and `low` are - only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. - Defaults to `auto`. + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. - response_format: The format in which the generated images are returned. Must be one of `url` or - `b64_json`. URLs are only valid for 60 minutes after the image has been - generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1` - will always return base64-encoded images. + quality: The quality of the image that will be generated. + + - `auto` (default value) will automatically select the best quality for the + given model. + - `high`, `medium` and `low` are supported for `gpt-image-1`. + - `hd` and `standard` are supported for `dall-e-3`. + - `standard` is the only option for `dall-e-2`. + + response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are + returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes + after the image has been generated. This parameter isn't supported for + `gpt-image-1` which will always return base64-encoded images. size: The size of the generated images. Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or `auto` (default value) for - `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. + `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and + one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`. + + style: The style of the generated images. This parameter is only supported for + `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean + towards generating hyper-real and dramatic images. Natural causes the model to + produce more natural, less hyper-real looking images. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. @@ -542,47 +1595,21 @@ async def edit( timeout: Override the client-level default timeout for this request, in seconds """ - body = deepcopy_minimal( - { - "image": image, - "prompt": prompt, - "background": background, - "mask": mask, - "model": model, - "n": n, - "output_compression": output_compression, - "output_format": output_format, - "quality": quality, - "response_format": response_format, - "size": size, - "user": user, - } - ) - files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", ""], ["mask"]]) - # It should be noted that the actual Content-Type header that will be - # sent to the server will contain a `boundary` parameter, e.g. - # multipart/form-data; boundary=---abc-- - extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} - return await self._post( - "/images/edits", - body=await async_maybe_transform(body, image_edit_params.ImageEditParams), - files=files, - options=make_request_options( - extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout - ), - cast_to=ImagesResponse, - ) + ... + @overload async def generate( self, *, prompt: str, + stream: bool, background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, output_compression: Optional[int] | NotGiven = NOT_GIVEN, output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[ @@ -597,7 +1624,7 @@ async def generate( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> ImagesResponse: + ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]: """ Creates an image given a prompt. [Learn more](https://platform.openai.com/docs/guides/images). @@ -607,6 +1634,10 @@ async def generate( characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + stream: Generate the image in streaming mode. Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + background: Allows to set transparency for the background of the generated image(s). This parameter is only supported for `gpt-image-1`. Must be one of `transparent`, `opaque` or `auto` (default value). When `auto` is used, the model will @@ -632,6 +1663,10 @@ async def generate( output_format: The format in which the generated images are returned. This parameter is only supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. + partial_images: The number of partial images to generate. This parameter is used for streaming + responses that return partial images. Value must be between 0 and 3. When set to + 0, the response will be a single image sent in one streaming event. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -667,6 +1702,36 @@ async def generate( timeout: Override the client-level default timeout for this request, in seconds """ + ... + + @required_args(["prompt"], ["prompt", "stream"]) + async def generate( + self, + *, + prompt: str, + background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN, + model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN, + moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN, + n: Optional[int] | NotGiven = NOT_GIVEN, + output_compression: Optional[int] | NotGiven = NOT_GIVEN, + output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN, + partial_images: Optional[int] | NotGiven = NOT_GIVEN, + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN, + response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, + size: Optional[ + Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"] + ] + | NotGiven = NOT_GIVEN, + stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, + user: str | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]: return await self._post( "/images/generations", body=await async_maybe_transform( @@ -678,18 +1743,24 @@ async def generate( "n": n, "output_compression": output_compression, "output_format": output_format, + "partial_images": partial_images, "quality": quality, "response_format": response_format, "size": size, + "stream": stream, "style": style, "user": user, }, - image_generate_params.ImageGenerateParams, + image_generate_params.ImageGenerateParamsStreaming + if stream + else image_generate_params.ImageGenerateParamsNonStreaming, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, + stream=stream or False, + stream_cls=AsyncStream[ImageGenStreamEvent], ) diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 453b26f555..51f3ee5c9b 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -60,15 +60,19 @@ from .image_generate_params import ImageGenerateParams as ImageGenerateParams from .eval_retrieve_response import EvalRetrieveResponse as EvalRetrieveResponse from .file_chunking_strategy import FileChunkingStrategy as FileChunkingStrategy +from .image_gen_stream_event import ImageGenStreamEvent as ImageGenStreamEvent from .upload_complete_params import UploadCompleteParams as UploadCompleteParams from .container_create_params import ContainerCreateParams as ContainerCreateParams from .container_list_response import ContainerListResponse as ContainerListResponse from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams +from .image_edit_stream_event import ImageEditStreamEvent as ImageEditStreamEvent from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .moderation_create_params import ModerationCreateParams as ModerationCreateParams from .vector_store_list_params import VectorStoreListParams as VectorStoreListParams from .container_create_response import ContainerCreateResponse as ContainerCreateResponse from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse +from .image_gen_completed_event import ImageGenCompletedEvent as ImageGenCompletedEvent +from .image_edit_completed_event import ImageEditCompletedEvent as ImageEditCompletedEvent from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse from .vector_store_create_params import VectorStoreCreateParams as VectorStoreCreateParams from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams @@ -79,8 +83,10 @@ from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams +from .image_gen_partial_image_event import ImageGenPartialImageEvent as ImageGenPartialImageEvent from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig +from .image_edit_partial_image_event import ImageEditPartialImageEvent as ImageEditPartialImageEvent from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam diff --git a/src/openai/types/image_edit_completed_event.py b/src/openai/types/image_edit_completed_event.py new file mode 100644 index 0000000000..a40682da6a --- /dev/null +++ b/src/openai/types/image_edit_completed_event.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageEditCompletedEvent", "Usage", "UsageInputTokensDetails"] + + +class UsageInputTokensDetails(BaseModel): + image_tokens: int + """The number of image tokens in the input prompt.""" + + text_tokens: int + """The number of text tokens in the input prompt.""" + + +class Usage(BaseModel): + input_tokens: int + """The number of tokens (images and text) in the input prompt.""" + + input_tokens_details: UsageInputTokensDetails + """The input tokens detailed information for the image generation.""" + + output_tokens: int + """The number of image tokens in the output image.""" + + total_tokens: int + """The total number of tokens (images and text) used for the image generation.""" + + +class ImageEditCompletedEvent(BaseModel): + b64_json: str + """Base64-encoded final edited image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the edited image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the edited image.""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the edited image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the edited image.""" + + type: Literal["image_edit.completed"] + """The type of the event. Always `image_edit.completed`.""" + + usage: Usage + """For `gpt-image-1` only, the token usage information for the image generation.""" diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index aecb98fa6f..d839e2fcbe 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -8,10 +8,10 @@ from .._types import FileTypes from .image_model import ImageModel -__all__ = ["ImageEditParams"] +__all__ = ["ImageEditParamsBase", "ImageEditParamsNonStreaming", "ImageEditParamsStreaming"] -class ImageEditParams(TypedDict, total=False): +class ImageEditParamsBase(TypedDict, total=False): image: Required[Union[FileTypes, List[FileTypes]]] """The image(s) to edit. Must be a supported image file or an array of images. @@ -40,6 +40,13 @@ class ImageEditParams(TypedDict, total=False): be set to either `png` (default value) or `webp`. """ + input_fidelity: Optional[Literal["high", "low"]] + """ + Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + """ + mask: FileTypes """An additional image whose fully transparent areas (e.g. @@ -72,6 +79,14 @@ class ImageEditParams(TypedDict, total=False): `jpeg`, or `webp`. The default value is `png`. """ + partial_images: Optional[int] + """The number of partial images to generate. + + This parameter is used for streaming responses that return partial images. Value + must be between 0 and 3. When set to 0, the response will be a single image sent + in one streaming event. + """ + quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] """The quality of the image that will be generated. @@ -101,3 +116,26 @@ class ImageEditParams(TypedDict, total=False): and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ + + +class ImageEditParamsNonStreaming(ImageEditParamsBase, total=False): + stream: Optional[Literal[False]] + """Edit the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + """ + + +class ImageEditParamsStreaming(ImageEditParamsBase): + stream: Required[Literal[True]] + """Edit the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. + """ + + +ImageEditParams = Union[ImageEditParamsNonStreaming, ImageEditParamsStreaming] diff --git a/src/openai/types/image_edit_partial_image_event.py b/src/openai/types/image_edit_partial_image_event.py new file mode 100644 index 0000000000..20da45efc3 --- /dev/null +++ b/src/openai/types/image_edit_partial_image_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageEditPartialImageEvent"] + + +class ImageEditPartialImageEvent(BaseModel): + b64_json: str + """Base64-encoded partial image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the requested edited image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the requested edited image.""" + + partial_image_index: int + """0-based index for the partial image (streaming).""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the requested edited image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the requested edited image.""" + + type: Literal["image_edit.partial_image"] + """The type of the event. Always `image_edit.partial_image`.""" diff --git a/src/openai/types/image_edit_stream_event.py b/src/openai/types/image_edit_stream_event.py new file mode 100644 index 0000000000..759f6c6db5 --- /dev/null +++ b/src/openai/types/image_edit_stream_event.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from .._utils import PropertyInfo +from .image_edit_completed_event import ImageEditCompletedEvent +from .image_edit_partial_image_event import ImageEditPartialImageEvent + +__all__ = ["ImageEditStreamEvent"] + +ImageEditStreamEvent: TypeAlias = Annotated[ + Union[ImageEditPartialImageEvent, ImageEditCompletedEvent], PropertyInfo(discriminator="type") +] diff --git a/src/openai/types/image_gen_completed_event.py b/src/openai/types/image_gen_completed_event.py new file mode 100644 index 0000000000..e78da842d4 --- /dev/null +++ b/src/openai/types/image_gen_completed_event.py @@ -0,0 +1,55 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageGenCompletedEvent", "Usage", "UsageInputTokensDetails"] + + +class UsageInputTokensDetails(BaseModel): + image_tokens: int + """The number of image tokens in the input prompt.""" + + text_tokens: int + """The number of text tokens in the input prompt.""" + + +class Usage(BaseModel): + input_tokens: int + """The number of tokens (images and text) in the input prompt.""" + + input_tokens_details: UsageInputTokensDetails + """The input tokens detailed information for the image generation.""" + + output_tokens: int + """The number of image tokens in the output image.""" + + total_tokens: int + """The total number of tokens (images and text) used for the image generation.""" + + +class ImageGenCompletedEvent(BaseModel): + b64_json: str + """Base64-encoded image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the generated image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the generated image.""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the generated image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the generated image.""" + + type: Literal["image_generation.completed"] + """The type of the event. Always `image_generation.completed`.""" + + usage: Usage + """For `gpt-image-1` only, the token usage information for the image generation.""" diff --git a/src/openai/types/image_gen_partial_image_event.py b/src/openai/types/image_gen_partial_image_event.py new file mode 100644 index 0000000000..965d450604 --- /dev/null +++ b/src/openai/types/image_gen_partial_image_event.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["ImageGenPartialImageEvent"] + + +class ImageGenPartialImageEvent(BaseModel): + b64_json: str + """Base64-encoded partial image data, suitable for rendering as an image.""" + + background: Literal["transparent", "opaque", "auto"] + """The background setting for the requested image.""" + + created_at: int + """The Unix timestamp when the event was created.""" + + output_format: Literal["png", "webp", "jpeg"] + """The output format for the requested image.""" + + partial_image_index: int + """0-based index for the partial image (streaming).""" + + quality: Literal["low", "medium", "high", "auto"] + """The quality setting for the requested image.""" + + size: Literal["1024x1024", "1024x1536", "1536x1024", "auto"] + """The size of the requested image.""" + + type: Literal["image_generation.partial_image"] + """The type of the event. Always `image_generation.partial_image`.""" diff --git a/src/openai/types/image_gen_stream_event.py b/src/openai/types/image_gen_stream_event.py new file mode 100644 index 0000000000..7dde5d5245 --- /dev/null +++ b/src/openai/types/image_gen_stream_event.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Annotated, TypeAlias + +from .._utils import PropertyInfo +from .image_gen_completed_event import ImageGenCompletedEvent +from .image_gen_partial_image_event import ImageGenPartialImageEvent + +__all__ = ["ImageGenStreamEvent"] + +ImageGenStreamEvent: TypeAlias = Annotated[ + Union[ImageGenPartialImageEvent, ImageGenCompletedEvent], PropertyInfo(discriminator="type") +] diff --git a/src/openai/types/image_generate_params.py b/src/openai/types/image_generate_params.py index 8fc10220dc..bd9f34b28e 100644 --- a/src/openai/types/image_generate_params.py +++ b/src/openai/types/image_generate_params.py @@ -7,10 +7,10 @@ from .image_model import ImageModel -__all__ = ["ImageGenerateParams"] +__all__ = ["ImageGenerateParamsBase", "ImageGenerateParamsNonStreaming", "ImageGenerateParamsStreaming"] -class ImageGenerateParams(TypedDict, total=False): +class ImageGenerateParamsBase(TypedDict, total=False): prompt: Required[str] """A text description of the desired image(s). @@ -62,6 +62,14 @@ class ImageGenerateParams(TypedDict, total=False): `jpeg`, or `webp`. """ + partial_images: Optional[int] + """The number of partial images to generate. + + This parameter is used for streaming responses that return partial images. Value + must be between 0 and 3. When set to 0, the response will be a single image sent + in one streaming event. + """ + quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] """The quality of the image that will be generated. @@ -107,3 +115,26 @@ class ImageGenerateParams(TypedDict, total=False): and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). """ + + +class ImageGenerateParamsNonStreaming(ImageGenerateParamsBase, total=False): + stream: Optional[Literal[False]] + """Generate the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + """ + + +class ImageGenerateParamsStreaming(ImageGenerateParamsBase): + stream: Required[Literal[True]] + """Generate the image in streaming mode. + + Defaults to `false`. See the + [Image generation guide](https://platform.openai.com/docs/guides/image-generation) + for more information. This parameter is only supported for `gpt-image-1`. + """ + + +ImageGenerateParams = Union[ImageGenerateParamsNonStreaming, ImageGenerateParamsStreaming] diff --git a/src/openai/types/responses/response_output_refusal.py b/src/openai/types/responses/response_output_refusal.py index eba581070d..685c8722a6 100644 --- a/src/openai/types/responses/response_output_refusal.py +++ b/src/openai/types/responses/response_output_refusal.py @@ -9,7 +9,7 @@ class ResponseOutputRefusal(BaseModel): refusal: str - """The refusal explanationfrom the model.""" + """The refusal explanation from the model.""" type: Literal["refusal"] """The type of the refusal. Always `refusal`.""" diff --git a/src/openai/types/responses/response_output_refusal_param.py b/src/openai/types/responses/response_output_refusal_param.py index 53140a6080..54cfaf0791 100644 --- a/src/openai/types/responses/response_output_refusal_param.py +++ b/src/openai/types/responses/response_output_refusal_param.py @@ -9,7 +9,7 @@ class ResponseOutputRefusalParam(TypedDict, total=False): refusal: Required[str] - """The refusal explanationfrom the model.""" + """The refusal explanation from the model.""" type: Required[Literal["refusal"]] """The type of the refusal. Always `refusal`.""" diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index 9c1573bda9..4399871e29 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -124,6 +124,13 @@ class ImageGeneration(BaseModel): One of `transparent`, `opaque`, or `auto`. Default: `auto`. """ + input_fidelity: Optional[Literal["high", "low"]] = None + """ + Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + """ + input_image_mask: Optional[ImageGenerationInputImageMask] = None """Optional mask for inpainting. diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index 493a1dad9c..a977f06e3f 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -125,6 +125,13 @@ class ImageGeneration(TypedDict, total=False): One of `transparent`, `opaque`, or `auto`. Default: `auto`. """ + input_fidelity: Optional[Literal["high", "low"]] + """ + Control how much effort the model will exert to match the style and features, + especially facial features, of input images. This parameter is only supported + for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`. + """ + input_image_mask: ImageGenerationInputImageMask """Optional mask for inpainting. diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py index 10fc56d685..99fe77d8e0 100644 --- a/tests/api_resources/test_images.py +++ b/tests/api_resources/test_images.py @@ -61,7 +61,7 @@ def test_streaming_response_create_variation(self, client: OpenAI) -> None: assert cast(Any, response.is_closed) is True @parametrize - def test_method_edit(self, client: OpenAI) -> None: + def test_method_edit_overload_1(self, client: OpenAI) -> None: image = client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", @@ -69,25 +69,28 @@ def test_method_edit(self, client: OpenAI) -> None: assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - def test_method_edit_with_all_params(self, client: OpenAI) -> None: + def test_method_edit_with_all_params_overload_1(self, client: OpenAI) -> None: image = client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", background="transparent", + input_fidelity="high", mask=b"raw file contents", model="string", n=1, output_compression=100, output_format="png", + partial_images=1, quality="high", response_format="url", size="1024x1024", + stream=False, user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - def test_raw_response_edit(self, client: OpenAI) -> None: + def test_raw_response_edit_overload_1(self, client: OpenAI) -> None: response = client.images.with_raw_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", @@ -99,7 +102,7 @@ def test_raw_response_edit(self, client: OpenAI) -> None: assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - def test_streaming_response_edit(self, client: OpenAI) -> None: + def test_streaming_response_edit_overload_1(self, client: OpenAI) -> None: with client.images.with_streaming_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", @@ -113,14 +116,71 @@ def test_streaming_response_edit(self, client: OpenAI) -> None: assert cast(Any, response.is_closed) is True @parametrize - def test_method_generate(self, client: OpenAI) -> None: + def test_method_edit_overload_2(self, client: OpenAI) -> None: + image_stream = client.images.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + ) + image_stream.response.close() + + @parametrize + def test_method_edit_with_all_params_overload_2(self, client: OpenAI) -> None: + image_stream = client.images.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + background="transparent", + input_fidelity="high", + mask=b"raw file contents", + model="string", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="high", + response_format="url", + size="1024x1024", + user="user-1234", + ) + image_stream.response.close() + + @parametrize + def test_raw_response_edit_overload_2(self, client: OpenAI) -> None: + response = client.images.with_raw_response.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + stream.close() + + @parametrize + def test_streaming_response_edit_overload_2(self, client: OpenAI) -> None: + with client.images.with_streaming_response.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_generate_overload_1(self, client: OpenAI) -> None: image = client.images.generate( prompt="A cute baby sea otter", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - def test_method_generate_with_all_params(self, client: OpenAI) -> None: + def test_method_generate_with_all_params_overload_1(self, client: OpenAI) -> None: image = client.images.generate( prompt="A cute baby sea otter", background="transparent", @@ -129,16 +189,18 @@ def test_method_generate_with_all_params(self, client: OpenAI) -> None: n=1, output_compression=100, output_format="png", + partial_images=1, quality="medium", response_format="url", size="1024x1024", + stream=False, style="vivid", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - def test_raw_response_generate(self, client: OpenAI) -> None: + def test_raw_response_generate_overload_1(self, client: OpenAI) -> None: response = client.images.with_raw_response.generate( prompt="A cute baby sea otter", ) @@ -149,7 +211,7 @@ def test_raw_response_generate(self, client: OpenAI) -> None: assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - def test_streaming_response_generate(self, client: OpenAI) -> None: + def test_streaming_response_generate_overload_1(self, client: OpenAI) -> None: with client.images.with_streaming_response.generate( prompt="A cute baby sea otter", ) as response: @@ -161,6 +223,59 @@ def test_streaming_response_generate(self, client: OpenAI) -> None: assert cast(Any, response.is_closed) is True + @parametrize + def test_method_generate_overload_2(self, client: OpenAI) -> None: + image_stream = client.images.generate( + prompt="A cute baby sea otter", + stream=True, + ) + image_stream.response.close() + + @parametrize + def test_method_generate_with_all_params_overload_2(self, client: OpenAI) -> None: + image_stream = client.images.generate( + prompt="A cute baby sea otter", + stream=True, + background="transparent", + model="string", + moderation="low", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="medium", + response_format="url", + size="1024x1024", + style="vivid", + user="user-1234", + ) + image_stream.response.close() + + @parametrize + def test_raw_response_generate_overload_2(self, client: OpenAI) -> None: + response = client.images.with_raw_response.generate( + prompt="A cute baby sea otter", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + stream.close() + + @parametrize + def test_streaming_response_generate_overload_2(self, client: OpenAI) -> None: + with client.images.with_streaming_response.generate( + prompt="A cute baby sea otter", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = response.parse() + stream.close() + + assert cast(Any, response.is_closed) is True + class TestAsyncImages: parametrize = pytest.mark.parametrize( @@ -211,7 +326,7 @@ async def test_streaming_response_create_variation(self, async_client: AsyncOpen assert cast(Any, response.is_closed) is True @parametrize - async def test_method_edit(self, async_client: AsyncOpenAI) -> None: + async def test_method_edit_overload_1(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", @@ -219,25 +334,28 @@ async def test_method_edit(self, async_client: AsyncOpenAI) -> None: assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - async def test_method_edit_with_all_params(self, async_client: AsyncOpenAI) -> None: + async def test_method_edit_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", background="transparent", + input_fidelity="high", mask=b"raw file contents", model="string", n=1, output_compression=100, output_format="png", + partial_images=1, quality="high", response_format="url", size="1024x1024", + stream=False, user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - async def test_raw_response_edit(self, async_client: AsyncOpenAI) -> None: + async def test_raw_response_edit_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.images.with_raw_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", @@ -249,7 +367,7 @@ async def test_raw_response_edit(self, async_client: AsyncOpenAI) -> None: assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - async def test_streaming_response_edit(self, async_client: AsyncOpenAI) -> None: + async def test_streaming_response_edit_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.images.with_streaming_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", @@ -263,14 +381,71 @@ async def test_streaming_response_edit(self, async_client: AsyncOpenAI) -> None: assert cast(Any, response.is_closed) is True @parametrize - async def test_method_generate(self, async_client: AsyncOpenAI) -> None: + async def test_method_edit_overload_2(self, async_client: AsyncOpenAI) -> None: + image_stream = await async_client.images.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + ) + await image_stream.response.aclose() + + @parametrize + async def test_method_edit_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: + image_stream = await async_client.images.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + background="transparent", + input_fidelity="high", + mask=b"raw file contents", + model="string", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="high", + response_format="url", + size="1024x1024", + user="user-1234", + ) + await image_stream.response.aclose() + + @parametrize + async def test_raw_response_edit_overload_2(self, async_client: AsyncOpenAI) -> None: + response = await async_client.images.with_raw_response.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + await stream.close() + + @parametrize + async def test_streaming_response_edit_overload_2(self, async_client: AsyncOpenAI) -> None: + async with async_client.images.with_streaming_response.edit( + image=b"raw file contents", + prompt="A cute baby sea otter wearing a beret", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_generate_overload_1(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.generate( prompt="A cute baby sea otter", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - async def test_method_generate_with_all_params(self, async_client: AsyncOpenAI) -> None: + async def test_method_generate_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.generate( prompt="A cute baby sea otter", background="transparent", @@ -279,16 +454,18 @@ async def test_method_generate_with_all_params(self, async_client: AsyncOpenAI) n=1, output_compression=100, output_format="png", + partial_images=1, quality="medium", response_format="url", size="1024x1024", + stream=False, style="vivid", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - async def test_raw_response_generate(self, async_client: AsyncOpenAI) -> None: + async def test_raw_response_generate_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.images.with_raw_response.generate( prompt="A cute baby sea otter", ) @@ -299,7 +476,7 @@ async def test_raw_response_generate(self, async_client: AsyncOpenAI) -> None: assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize - async def test_streaming_response_generate(self, async_client: AsyncOpenAI) -> None: + async def test_streaming_response_generate_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.images.with_streaming_response.generate( prompt="A cute baby sea otter", ) as response: @@ -310,3 +487,56 @@ async def test_streaming_response_generate(self, async_client: AsyncOpenAI) -> N assert_matches_type(ImagesResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_generate_overload_2(self, async_client: AsyncOpenAI) -> None: + image_stream = await async_client.images.generate( + prompt="A cute baby sea otter", + stream=True, + ) + await image_stream.response.aclose() + + @parametrize + async def test_method_generate_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: + image_stream = await async_client.images.generate( + prompt="A cute baby sea otter", + stream=True, + background="transparent", + model="string", + moderation="low", + n=1, + output_compression=100, + output_format="png", + partial_images=1, + quality="medium", + response_format="url", + size="1024x1024", + style="vivid", + user="user-1234", + ) + await image_stream.response.aclose() + + @parametrize + async def test_raw_response_generate_overload_2(self, async_client: AsyncOpenAI) -> None: + response = await async_client.images.with_raw_response.generate( + prompt="A cute baby sea otter", + stream=True, + ) + + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + stream = response.parse() + await stream.close() + + @parametrize + async def test_streaming_response_generate_overload_2(self, async_client: AsyncOpenAI) -> None: + async with async_client.images.with_streaming_response.generate( + prompt="A cute baby sea otter", + stream=True, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + stream = await response.parse() + await stream.close() + + assert cast(Any, response.is_closed) is True From 35df552d032873b62c2ae127a0efce60947dbed0 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:25:26 +0000 Subject: [PATCH 32/90] release: 1.97.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6b38a1bd5a..7b33636f46 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.96.1" + ".": "1.97.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 93bfb63f37..2e603f06be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.97.0 (2025-07-16) + +Full Changelog: [v1.96.1...v1.97.0](https://github.com/openai/openai-python/compare/v1.96.1...v1.97.0) + +### Features + +* **api:** manual updates ([ed8e899](https://github.com/openai/openai-python/commit/ed8e89953d11bd5f44fa531422bdbb7a577ab426)) + ## 1.96.1 (2025-07-15) Full Changelog: [v1.96.0...v1.96.1](https://github.com/openai/openai-python/compare/v1.96.0...v1.96.1) diff --git a/pyproject.toml b/pyproject.toml index 0f655d058d..533379d52a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.96.1" +version = "1.97.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 39be0338f6..8e5ed5fa86 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.96.1" # x-release-please-version +__version__ = "1.97.0" # x-release-please-version From fa466c099aab0213f3ce09d5adcfca5ae2bf58a4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 16 Jul 2025 19:06:17 +0000 Subject: [PATCH 33/90] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 2b9160cf6e..bc75e5c98c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml openapi_spec_hash: d8b7d38911fead545adf3e4297956410 -config_hash: 5525bda35e48ea6387c6175c4d1651fa +config_hash: b2a4028fdbb27a08de89831ed310e244 From c6b933520213cddea927c4fe83c1abe2f66893d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 12:27:19 +0000 Subject: [PATCH 34/90] fix(parsing): ignore empty metadata --- src/openai/_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index f347a81dac..dee5551948 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -464,7 +464,7 @@ def construct_type(*, value: object, type_: object, metadata: Optional[List[Any] type_ = type_.__value__ # type: ignore[unreachable] # unwrap `Annotated[T, ...]` -> `T` - if metadata is not None: + if metadata is not None and len(metadata) > 0: meta: tuple[Any, ...] = tuple(metadata) elif is_annotated_type(type_): meta = get_args(type_)[1:] From bf4a9a422e5eaffa90863439ddfd8a82cbaaa636 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 21:17:00 +0000 Subject: [PATCH 35/90] chore(api): event shapes more accurate --- .stats.yml | 6 ++-- api.md | 2 -- src/openai/lib/streaming/responses/_events.py | 4 --- .../lib/streaming/responses/_responses.py | 2 ++ src/openai/resources/audio/speech.py | 8 ++--- .../resources/beta/realtime/sessions.py | 14 +++----- .../resources/chat/completions/completions.py | 12 +++---- src/openai/resources/images.py | 36 +++++++++++++++++++ src/openai/resources/responses/responses.py | 12 +++---- .../types/audio/speech_create_params.py | 6 +--- .../types/beta/realtime/realtime_response.py | 9 ++--- .../beta/realtime/response_create_event.py | 8 ++--- .../realtime/response_create_event_param.py | 6 ++-- src/openai/types/beta/realtime/session.py | 8 ++--- .../beta/realtime/session_create_params.py | 6 ++-- .../beta/realtime/session_create_response.py | 8 ++--- .../beta/realtime/session_update_event.py | 8 ++--- .../realtime/session_update_event_param.py | 6 ++-- src/openai/types/chat/chat_completion.py | 2 +- .../types/chat/chat_completion_audio_param.py | 6 +--- .../types/chat/chat_completion_chunk.py | 2 +- .../types/chat/completion_create_params.py | 2 +- src/openai/types/image_edit_params.py | 3 ++ src/openai/types/image_generate_params.py | 3 ++ src/openai/types/images_response.py | 2 +- src/openai/types/responses/__init__.py | 2 -- src/openai/types/responses/response.py | 2 +- .../response_code_interpreter_tool_call.py | 6 +++- ...sponse_code_interpreter_tool_call_param.py | 6 +++- .../types/responses/response_create_params.py | 2 +- ...response_mcp_call_arguments_delta_event.py | 7 ++-- .../response_mcp_call_arguments_done_event.py | 4 +-- .../response_mcp_call_completed_event.py | 6 ++++ .../response_mcp_call_failed_event.py | 6 ++++ ...response_mcp_list_tools_completed_event.py | 6 ++++ .../response_mcp_list_tools_failed_event.py | 6 ++++ ...sponse_mcp_list_tools_in_progress_event.py | 6 ++++ .../response_reasoning_delta_event.py | 27 -------------- .../response_reasoning_done_event.py | 27 -------------- .../types/responses/response_stream_event.py | 4 --- .../responses/response_text_delta_event.py | 25 ++++++++++++- .../responses/response_text_done_event.py | 25 ++++++++++++- .../types/shared/function_definition.py | 2 +- .../shared_params/function_definition.py | 2 +- 44 files changed, 186 insertions(+), 166 deletions(-) delete mode 100644 src/openai/types/responses/response_reasoning_delta_event.py delete mode 100644 src/openai/types/responses/response_reasoning_done_event.py diff --git a/.stats.yml b/.stats.yml index bc75e5c98c..2dc4f680a9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-670ea0d2cc44f52a87dd3cadea45632953283e0636ba30788fdbdb22a232ccac.yml -openapi_spec_hash: d8b7d38911fead545adf3e4297956410 -config_hash: b2a4028fdbb27a08de89831ed310e244 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml +openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0 +config_hash: e822d0c9082c8b312264403949243179 diff --git a/api.md b/api.md index b3a2245cdd..0280b886d1 100644 --- a/api.md +++ b/api.md @@ -791,8 +791,6 @@ from openai.types.responses import ( ResponseOutputTextAnnotationAddedEvent, ResponsePrompt, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, - ResponseReasoningDoneEvent, ResponseReasoningItem, ResponseReasoningSummaryDeltaEvent, ResponseReasoningSummaryDoneEvent, diff --git a/src/openai/lib/streaming/responses/_events.py b/src/openai/lib/streaming/responses/_events.py index 6e547815e2..4c8a588944 100644 --- a/src/openai/lib/streaming/responses/_events.py +++ b/src/openai/lib/streaming/responses/_events.py @@ -21,9 +21,7 @@ ResponseRefusalDoneEvent, ResponseRefusalDeltaEvent, ResponseMcpCallFailedEvent, - ResponseReasoningDoneEvent, ResponseOutputItemDoneEvent, - ResponseReasoningDeltaEvent, ResponseContentPartDoneEvent, ResponseOutputItemAddedEvent, ResponseContentPartAddedEvent, @@ -139,10 +137,8 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, ResponseReasoningSummaryDeltaEvent, ResponseReasoningSummaryDoneEvent, - ResponseReasoningDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/lib/streaming/responses/_responses.py b/src/openai/lib/streaming/responses/_responses.py index 2c2fec5469..d45664de45 100644 --- a/src/openai/lib/streaming/responses/_responses.py +++ b/src/openai/lib/streaming/responses/_responses.py @@ -264,6 +264,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven item_id=event.item_id, output_index=event.output_index, sequence_number=event.sequence_number, + logprobs=event.logprobs, type="response.output_text.delta", snapshot=content.text, ) @@ -282,6 +283,7 @@ def handle_event(self, event: RawResponseStreamEvent) -> List[ResponseStreamEven item_id=event.item_id, output_index=event.output_index, sequence_number=event.sequence_number, + logprobs=event.logprobs, type="response.output_text.done", text=event.text, parsed=parse_text(event.text, text_format=self._text_format), diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index fe776baae8..6251cfed4e 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -50,9 +50,7 @@ def create( *, input: str, model: Union[str, SpeechModel], - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ], + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, @@ -146,9 +144,7 @@ async def create( *, input: str, model: Union[str, SpeechModel], - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ], + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]], instructions: str | NotGiven = NOT_GIVEN, response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 77f1ec9059..e639c0ba43 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -66,9 +66,7 @@ def create( tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -163,8 +161,7 @@ def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. extra_headers: Send extra headers @@ -251,9 +248,7 @@ async def create( tools: Iterable[session_create_params.Tool] | NotGiven = NOT_GIVEN, tracing: session_create_params.Tracing | NotGiven = NOT_GIVEN, turn_detection: session_create_params.TurnDetection | NotGiven = NOT_GIVEN, - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -348,8 +343,7 @@ async def create( voice: The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are - `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, - `shimmer`, and `verse`. + `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. extra_headers: Send extra headers diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 5806296773..739aa662d4 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -417,7 +417,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -697,7 +697,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -968,7 +968,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1784,7 +1784,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -2064,7 +2064,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -2335,7 +2335,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 77b7a1b24e..c8eda8a76f 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -196,6 +196,9 @@ def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -310,6 +313,9 @@ def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -420,6 +426,9 @@ def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -579,6 +588,9 @@ def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -690,6 +702,9 @@ def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -797,6 +812,9 @@ def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -1066,6 +1084,9 @@ async def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -1180,6 +1201,9 @@ async def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -1290,6 +1314,9 @@ async def edit( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality. Defaults to `auto`. @@ -1449,6 +1476,9 @@ async def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -1560,6 +1590,9 @@ async def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the @@ -1667,6 +1700,9 @@ async def generate( responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. + quality: The quality of the image that will be generated. - `auto` (default value) will automatically select the best quality for the diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index ce132bdb05..fe99aa851d 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -198,7 +198,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -414,7 +414,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -623,7 +623,7 @@ def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1463,7 +1463,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1679,7 +1679,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service @@ -1888,7 +1888,7 @@ async def create( - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index 4ee4a3c4e4..feeb68c68b 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -20,11 +20,7 @@ class SpeechCreateParams(TypedDict, total=False): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`. """ - voice: Required[ - Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] - ] + voice: Required[Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]] """The voice to use when generating the audio. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py index 28e03c8717..ccc97c5d22 100644 --- a/src/openai/types/beta/realtime/realtime_response.py +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -80,13 +80,8 @@ class RealtimeResponse(BaseModel): will become the input for later turns. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """ The voice the model used to respond. Current voice options are `alloy`, `ash`, - `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and - `verse`. + `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/response_create_event.py b/src/openai/types/beta/realtime/response_create_event.py index 3b8a6de8df..7219cedbf3 100644 --- a/src/openai/types/beta/realtime/response_create_event.py +++ b/src/openai/types/beta/realtime/response_create_event.py @@ -101,16 +101,12 @@ class Response(BaseModel): tools: Optional[List[ResponseTool]] = None """Tools (functions) available to the model.""" - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/response_create_event_param.py b/src/openai/types/beta/realtime/response_create_event_param.py index c569d507a0..b4d54bba92 100644 --- a/src/openai/types/beta/realtime/response_create_event_param.py +++ b/src/openai/types/beta/realtime/response_create_event_param.py @@ -102,14 +102,12 @@ class Response(TypedDict, total=False): tools: Iterable[ResponseTool] """Tools (functions) available to the model.""" - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index 606fd83851..f84b3ee4a0 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -268,14 +268,10 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index e04985d2b6..6be09d8bae 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -145,14 +145,12 @@ class SessionCreateParams(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_create_response.py b/src/openai/types/beta/realtime/session_create_response.py index 15d5c1742b..471da03691 100644 --- a/src/openai/types/beta/realtime/session_create_response.py +++ b/src/openai/types/beta/realtime/session_create_response.py @@ -187,14 +187,10 @@ class SessionCreateResponse(BaseModel): speech. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo` `sage`, `shimmer` and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 789b9cd1e5..5b4185dbf6 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -290,16 +290,12 @@ class Session(BaseModel): natural conversations, but may have a higher latency. """ - voice: Union[ - str, - Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"], - None, - ] = None + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"], None] = None """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index 2dfa2c26f3..3063449bfd 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -288,14 +288,12 @@ class Session(TypedDict, total=False): natural conversations, but may have a higher latency. """ - voice: Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] + voice: Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] """The voice the model uses to respond. Voice cannot be changed during the session once the model has responded with audio at least once. Current voice options are `alloy`, `ash`, `ballad`, - `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and `verse`. + `coral`, `echo`, `sage`, `shimmer`, and `verse`. """ diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index afc23e3f3d..42463f7ec8 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -65,7 +65,7 @@ class ChatCompletion(BaseModel): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/chat/chat_completion_audio_param.py b/src/openai/types/chat/chat_completion_audio_param.py index 25caada177..dc68159c1e 100644 --- a/src/openai/types/chat/chat_completion_audio_param.py +++ b/src/openai/types/chat/chat_completion_audio_param.py @@ -15,11 +15,7 @@ class ChatCompletionAudioParam(TypedDict, total=False): Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. """ - voice: Required[ - Union[ - str, Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"] - ] - ] + voice: Required[Union[str, Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]] """The voice the model uses to respond. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index da6e315830..082bb6cc19 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -134,7 +134,7 @@ class ChatCompletionChunk(BaseModel): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 44ea853041..191793c18f 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -214,7 +214,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py index d839e2fcbe..c0481012e4 100644 --- a/src/openai/types/image_edit_params.py +++ b/src/openai/types/image_edit_params.py @@ -85,6 +85,9 @@ class ImageEditParamsBase(TypedDict, total=False): This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. """ quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] diff --git a/src/openai/types/image_generate_params.py b/src/openai/types/image_generate_params.py index bd9f34b28e..e9e9292cc2 100644 --- a/src/openai/types/image_generate_params.py +++ b/src/openai/types/image_generate_params.py @@ -68,6 +68,9 @@ class ImageGenerateParamsBase(TypedDict, total=False): This parameter is used for streaming responses that return partial images. Value must be between 0 and 3. When set to 0, the response will be a single image sent in one streaming event. + + Note that the final image may be sent before the full number of partial images + are generated if the full image is generated more quickly. """ quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] diff --git a/src/openai/types/images_response.py b/src/openai/types/images_response.py index 2a8ca728ab..89cc71df24 100644 --- a/src/openai/types/images_response.py +++ b/src/openai/types/images_response.py @@ -25,7 +25,7 @@ class Usage(BaseModel): """The input tokens detailed information for the image generation.""" output_tokens: int - """The number of image tokens in the output image.""" + """The number of output tokens generated by the model.""" total_tokens: int """The total number of tokens (images and text) used for the image generation.""" diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 4316e47730..b563035e78 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -81,11 +81,9 @@ from .response_refusal_delta_event import ResponseRefusalDeltaEvent as ResponseRefusalDeltaEvent from .response_output_message_param import ResponseOutputMessageParam as ResponseOutputMessageParam from .response_output_refusal_param import ResponseOutputRefusalParam as ResponseOutputRefusalParam -from .response_reasoning_done_event import ResponseReasoningDoneEvent as ResponseReasoningDoneEvent from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent -from .response_reasoning_delta_event import ResponseReasoningDeltaEvent as ResponseReasoningDeltaEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index db85d87f4e..2af85d03fb 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -176,7 +176,7 @@ class Response(BaseModel): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/responses/response_code_interpreter_tool_call.py b/src/openai/types/responses/response_code_interpreter_tool_call.py index 7e4dc9f984..257937118b 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call.py @@ -45,7 +45,11 @@ class ResponseCodeInterpreterToolCall(BaseModel): """ status: Literal["in_progress", "completed", "incomplete", "interpreting", "failed"] - """The status of the code interpreter tool call.""" + """The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + """ type: Literal["code_interpreter_call"] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" diff --git a/src/openai/types/responses/response_code_interpreter_tool_call_param.py b/src/openai/types/responses/response_code_interpreter_tool_call_param.py index 69e01f99ed..435091001f 100644 --- a/src/openai/types/responses/response_code_interpreter_tool_call_param.py +++ b/src/openai/types/responses/response_code_interpreter_tool_call_param.py @@ -44,7 +44,11 @@ class ResponseCodeInterpreterToolCallParam(TypedDict, total=False): """ status: Required[Literal["in_progress", "completed", "incomplete", "interpreting", "failed"]] - """The status of the code interpreter tool call.""" + """The status of the code interpreter tool call. + + Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and + `failed`. + """ type: Required[Literal["code_interpreter_call"]] """The type of the code interpreter tool call. Always `code_interpreter_call`.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 0187e1fda8..08feefd081 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -136,7 +136,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): - If set to 'auto', then the request will be processed with the service tier configured in the Project settings. Unless otherwise configured, the Project will use 'default'. - - If set to 'default', then the requset will be processed with the standard + - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or 'priority', then the request will be processed with the corresponding service diff --git a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py index 8481506dc3..54eff38373 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_delta_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_delta_event.py @@ -8,8 +8,11 @@ class ResponseMcpCallArgumentsDeltaEvent(BaseModel): - delta: object - """The partial update to the arguments for the MCP tool call.""" + delta: str + """ + A JSON string containing the partial update to the arguments for the MCP tool + call. + """ item_id: str """The unique identifier of the MCP tool call item being processed.""" diff --git a/src/openai/types/responses/response_mcp_call_arguments_done_event.py b/src/openai/types/responses/response_mcp_call_arguments_done_event.py index 4be09d4862..59ce9bc944 100644 --- a/src/openai/types/responses/response_mcp_call_arguments_done_event.py +++ b/src/openai/types/responses/response_mcp_call_arguments_done_event.py @@ -8,8 +8,8 @@ class ResponseMcpCallArgumentsDoneEvent(BaseModel): - arguments: object - """The finalized arguments for the MCP tool call.""" + arguments: str + """A JSON string containing the finalized arguments for the MCP tool call.""" item_id: str """The unique identifier of the MCP tool call item being processed.""" diff --git a/src/openai/types/responses/response_mcp_call_completed_event.py b/src/openai/types/responses/response_mcp_call_completed_event.py index 009fbc3c60..2fee5dff81 100644 --- a/src/openai/types/responses/response_mcp_call_completed_event.py +++ b/src/openai/types/responses/response_mcp_call_completed_event.py @@ -8,6 +8,12 @@ class ResponseMcpCallCompletedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that completed.""" + + output_index: int + """The index of the output item that completed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_call_failed_event.py b/src/openai/types/responses/response_mcp_call_failed_event.py index e6edc6ded5..ca41ab7159 100644 --- a/src/openai/types/responses/response_mcp_call_failed_event.py +++ b/src/openai/types/responses/response_mcp_call_failed_event.py @@ -8,6 +8,12 @@ class ResponseMcpCallFailedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that failed.""" + + output_index: int + """The index of the output item that failed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_completed_event.py b/src/openai/types/responses/response_mcp_list_tools_completed_event.py index 6290c3cf9f..c60ad88ee5 100644 --- a/src/openai/types/responses/response_mcp_list_tools_completed_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_completed_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsCompletedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that produced this output.""" + + output_index: int + """The index of the output item that was processed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_failed_event.py b/src/openai/types/responses/response_mcp_list_tools_failed_event.py index 1f6e325b36..0c966c447a 100644 --- a/src/openai/types/responses/response_mcp_list_tools_failed_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_failed_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsFailedEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that failed.""" + + output_index: int + """The index of the output item that failed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py index 236e5fe6e7..f451db1ed5 100644 --- a/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py +++ b/src/openai/types/responses/response_mcp_list_tools_in_progress_event.py @@ -8,6 +8,12 @@ class ResponseMcpListToolsInProgressEvent(BaseModel): + item_id: str + """The ID of the MCP tool call item that is being processed.""" + + output_index: int + """The index of the output item that is being processed.""" + sequence_number: int """The sequence number of this event.""" diff --git a/src/openai/types/responses/response_reasoning_delta_event.py b/src/openai/types/responses/response_reasoning_delta_event.py deleted file mode 100644 index f37d3d370c..0000000000 --- a/src/openai/types/responses/response_reasoning_delta_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningDeltaEvent"] - - -class ResponseReasoningDeltaEvent(BaseModel): - content_index: int - """The index of the reasoning content part within the output item.""" - - delta: object - """The partial update to the reasoning content.""" - - item_id: str - """The unique identifier of the item for which reasoning is being updated.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - type: Literal["response.reasoning.delta"] - """The type of the event. Always 'response.reasoning.delta'.""" diff --git a/src/openai/types/responses/response_reasoning_done_event.py b/src/openai/types/responses/response_reasoning_done_event.py deleted file mode 100644 index 9f8b127d7e..0000000000 --- a/src/openai/types/responses/response_reasoning_done_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningDoneEvent"] - - -class ResponseReasoningDoneEvent(BaseModel): - content_index: int - """The index of the reasoning content part within the output item.""" - - item_id: str - """The unique identifier of the item for which reasoning is finalized.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - text: str - """The finalized reasoning text.""" - - type: Literal["response.reasoning.done"] - """The type of the event. Always 'response.reasoning.done'.""" diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py index 24a83f1aa2..98e1d6c34d 100644 --- a/src/openai/types/responses/response_stream_event.py +++ b/src/openai/types/responses/response_stream_event.py @@ -17,9 +17,7 @@ from .response_in_progress_event import ResponseInProgressEvent from .response_refusal_done_event import ResponseRefusalDoneEvent from .response_refusal_delta_event import ResponseRefusalDeltaEvent -from .response_reasoning_done_event import ResponseReasoningDoneEvent from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent -from .response_reasoning_delta_event import ResponseReasoningDeltaEvent from .response_output_item_done_event import ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent from .response_output_item_added_event import ResponseOutputItemAddedEvent @@ -111,8 +109,6 @@ ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningDeltaEvent, - ResponseReasoningDoneEvent, ResponseReasoningSummaryDeltaEvent, ResponseReasoningSummaryDoneEvent, ], diff --git a/src/openai/types/responses/response_text_delta_event.py b/src/openai/types/responses/response_text_delta_event.py index 7e4aec7024..b5379b7ac3 100644 --- a/src/openai/types/responses/response_text_delta_event.py +++ b/src/openai/types/responses/response_text_delta_event.py @@ -1,10 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["ResponseTextDeltaEvent"] +__all__ = ["ResponseTextDeltaEvent", "Logprob", "LogprobTopLogprob"] + + +class LogprobTopLogprob(BaseModel): + token: Optional[str] = None + """A possible text token.""" + + logprob: Optional[float] = None + """The log probability of this token.""" + + +class Logprob(BaseModel): + token: str + """A possible text token.""" + + logprob: float + """The log probability of this token.""" + + top_logprobs: Optional[List[LogprobTopLogprob]] = None + """The log probability of the top 20 most likely tokens.""" class ResponseTextDeltaEvent(BaseModel): @@ -17,6 +37,9 @@ class ResponseTextDeltaEvent(BaseModel): item_id: str """The ID of the output item that the text delta was added to.""" + logprobs: List[Logprob] + """The log probabilities of the tokens in the delta.""" + output_index: int """The index of the output item that the text delta was added to.""" diff --git a/src/openai/types/responses/response_text_done_event.py b/src/openai/types/responses/response_text_done_event.py index 0d5ed4dd19..d9776a1844 100644 --- a/src/openai/types/responses/response_text_done_event.py +++ b/src/openai/types/responses/response_text_done_event.py @@ -1,10 +1,30 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel -__all__ = ["ResponseTextDoneEvent"] +__all__ = ["ResponseTextDoneEvent", "Logprob", "LogprobTopLogprob"] + + +class LogprobTopLogprob(BaseModel): + token: Optional[str] = None + """A possible text token.""" + + logprob: Optional[float] = None + """The log probability of this token.""" + + +class Logprob(BaseModel): + token: str + """A possible text token.""" + + logprob: float + """The log probability of this token.""" + + top_logprobs: Optional[List[LogprobTopLogprob]] = None + """The log probability of the top 20 most likely tokens.""" class ResponseTextDoneEvent(BaseModel): @@ -14,6 +34,9 @@ class ResponseTextDoneEvent(BaseModel): item_id: str """The ID of the output item that the text content is finalized.""" + logprobs: List[Logprob] + """The log probabilities of the tokens in the delta.""" + output_index: int """The index of the output item that the text content is finalized.""" diff --git a/src/openai/types/shared/function_definition.py b/src/openai/types/shared/function_definition.py index 06baa23170..33ebb9ad3e 100644 --- a/src/openai/types/shared/function_definition.py +++ b/src/openai/types/shared/function_definition.py @@ -39,5 +39,5 @@ class FunctionDefinition(BaseModel): If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](https://platform.openai.com/docs/guides/function-calling). """ diff --git a/src/openai/types/shared_params/function_definition.py b/src/openai/types/shared_params/function_definition.py index d45ec13f1e..b3fdaf86ff 100644 --- a/src/openai/types/shared_params/function_definition.py +++ b/src/openai/types/shared_params/function_definition.py @@ -41,5 +41,5 @@ class FunctionDefinition(TypedDict, total=False): If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the - [function calling guide](docs/guides/function-calling). + [function calling guide](https://platform.openai.com/docs/guides/function-calling). """ From 48df6b4c30d7e4b1f8a60cf3d34bce8dab06a30b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 12:04:02 +0000 Subject: [PATCH 36/90] fix(parsing): parse extra field types --- src/openai/_models.py | 25 +++++++++++++++++++++++-- tests/test_models.py | 29 ++++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index dee5551948..d84d51d913 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -233,14 +233,18 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride] else: fields_values[name] = field_get_default(field) + extra_field_type = _get_extra_fields_type(__cls) + _extra = {} for key, value in values.items(): if key not in model_fields: + parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value + if PYDANTIC_V2: - _extra[key] = value + _extra[key] = parsed else: _fields_set.add(key) - fields_values[key] = value + fields_values[key] = parsed object.__setattr__(m, "__dict__", fields_values) @@ -395,6 +399,23 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object: return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None)) +def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None: + if not PYDANTIC_V2: + # TODO + return None + + schema = cls.__pydantic_core_schema__ + if schema["type"] == "model": + fields = schema["schema"] + if fields["type"] == "model-fields": + extras = fields.get("extras_schema") + if extras and "cls" in extras: + # mypy can't narrow the type + return extras["cls"] # type: ignore[no-any-return] + + return None + + def is_basemodel(type_: type) -> bool: """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" if is_union(type_): diff --git a/tests/test_models.py b/tests/test_models.py index 7262f45006..54a3a32048 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,5 +1,5 @@ import json -from typing import Any, Dict, List, Union, Optional, cast +from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast from datetime import datetime, timezone from typing_extensions import Literal, Annotated, TypeAliasType @@ -934,3 +934,30 @@ class Type2(BaseModel): ) assert isinstance(model, Type1) assert isinstance(model.value, InnerType2) + + +@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now") +def test_extra_properties() -> None: + class Item(BaseModel): + prop: int + + class Model(BaseModel): + __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride] + + other: str + + if TYPE_CHECKING: + + def __getattr__(self, attr: str) -> Item: ... + + model = construct_type( + type_=Model, + value={ + "a": {"prop": 1}, + "other": "foo", + }, + ) + assert isinstance(model, Model) + assert model.a.prop == 1 + assert isinstance(model.a, Item) + assert model.other == "foo" From e6c6757553bbdb777c31d0daf5916fb9e2b47ff8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 22 Jul 2025 12:04:35 +0000 Subject: [PATCH 37/90] release: 1.97.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7b33636f46..9cdfd7b049 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.97.0" + ".": "1.97.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e603f06be..0c8d06cbb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 1.97.1 (2025-07-22) + +Full Changelog: [v1.97.0...v1.97.1](https://github.com/openai/openai-python/compare/v1.97.0...v1.97.1) + +### Bug Fixes + +* **parsing:** ignore empty metadata ([58c359f](https://github.com/openai/openai-python/commit/58c359ff67fd6103268e4405600fd58844b6f27b)) +* **parsing:** parse extra field types ([d524b7e](https://github.com/openai/openai-python/commit/d524b7e201418ccc9b5c2206da06d1be011808e5)) + + +### Chores + +* **api:** event shapes more accurate ([f3a9a92](https://github.com/openai/openai-python/commit/f3a9a9229280ecb7e0b2779dd44290df6d9824ef)) + ## 1.97.0 (2025-07-16) Full Changelog: [v1.96.1...v1.97.0](https://github.com/openai/openai-python/compare/v1.96.1...v1.97.0) diff --git a/pyproject.toml b/pyproject.toml index 533379d52a..af1366b34e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.97.0" +version = "1.97.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 8e5ed5fa86..9073c643cc 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.97.0" # x-release-please-version +__version__ = "1.97.1" # x-release-please-version From 48188cc8d5af8c8c4359f84848ea9e436739819f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 07:40:14 -0400 Subject: [PATCH 38/90] release: 1.97.2 (#2494) * codegen metadata * fix(parsing): ignore empty metadata * chore(internal): refactor stream event processing to be more future proof * fixup! * fixup! * fixup! * update comment * chore(project): add settings file for vscode * flip logic around * release: 1.97.2 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> Co-authored-by: David Meadows --- .gitignore | 1 - .release-please-manifest.json | 2 +- .vscode/settings.json | 3 +++ CHANGELOG.md | 9 +++++++++ pyproject.toml | 2 +- src/openai/_streaming.py | 33 ++++++++++++++------------------- src/openai/_version.py | 2 +- 7 files changed, 29 insertions(+), 23 deletions(-) create mode 100644 .vscode/settings.json diff --git a/.gitignore b/.gitignore index 70815df7f6..55c6ca861f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,4 @@ .prism.log -.vscode _dev __pycache__ diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9cdfd7b049..1137af1259 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.97.1" + ".": "1.97.2" } \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000000..5b01030785 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "python.analysis.importFormat": "relative", +} diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c8d06cbb6..945e224cf9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 1.97.2 (2025-07-30) + +Full Changelog: [v1.97.1...v1.97.2](https://github.com/openai/openai-python/compare/v1.97.1...v1.97.2) + +### Chores + +* **client:** refactor streaming slightly to better future proof it ([71c0c74](https://github.com/openai/openai-python/commit/71c0c747132221b798e419bc5a37baf67173d34e)) +* **project:** add settings file for vscode ([29c22c9](https://github.com/openai/openai-python/commit/29c22c90fd229983355089f95d0bba9de15efedb)) + ## 1.97.1 (2025-07-22) Full Changelog: [v1.97.0...v1.97.1](https://github.com/openai/openai-python/compare/v1.97.0...v1.97.1) diff --git a/pyproject.toml b/pyproject.toml index af1366b34e..5b59053d02 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.97.1" +version = "1.97.2" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index fa0a30e183..f586de74ff 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -59,14 +59,11 @@ def __stream__(self) -> Iterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None or ( - sse.event.startswith("response.") or - sse.event.startswith("transcript.") or - sse.event.startswith("image_edit.") or - sse.event.startswith("image_generation.") - ): + # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data + if sse.event and sse.event.startswith("thread."): data = sse.json() - if is_mapping(data) and data.get("error"): + + if sse.event == "error" and is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -80,12 +77,10 @@ def __stream__(self) -> Iterator[_T]: body=data["error"], ) - yield process_data(data=data, cast_to=cast_to, response=response) - + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) else: data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): + if is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -99,7 +94,7 @@ def __stream__(self) -> Iterator[_T]: body=data["error"], ) - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed for _sse in iterator: @@ -166,9 +161,11 @@ async def __stream__(self) -> AsyncIterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None or sse.event.startswith("response.") or sse.event.startswith("transcript."): + # we have to special case the Assistants `thread.` events since we won't have an "event" key in the data + if sse.event and sse.event.startswith("thread."): data = sse.json() - if is_mapping(data) and data.get("error"): + + if sse.event == "error" and is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -182,12 +179,10 @@ async def __stream__(self) -> AsyncIterator[_T]: body=data["error"], ) - yield process_data(data=data, cast_to=cast_to, response=response) - + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) else: data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): + if is_mapping(data) and data.get("error"): message = None error = data.get("error") if is_mapping(error): @@ -201,7 +196,7 @@ async def __stream__(self) -> AsyncIterator[_T]: body=data["error"], ) - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) + yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed async for _sse in iterator: diff --git a/src/openai/_version.py b/src/openai/_version.py index 9073c643cc..59fb46ac23 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.97.1" # x-release-please-version +__version__ = "1.97.2" # x-release-please-version From a3315d9fcc17d7583603476f088929fb2b9e71ca Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 08:47:13 -0400 Subject: [PATCH 39/90] release: 1.98.0 (#2503) * feat(api): manual updates * release: 1.98.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- .stats.yml | 6 +- CHANGELOG.md | 8 ++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- .../resources/chat/completions/completions.py | 128 +++++++++++++++--- src/openai/resources/responses/responses.py | 128 +++++++++++++++--- src/openai/types/chat/__init__.py | 2 + .../chat_completion_content_part_image.py | 27 ++++ .../chat/chat_completion_content_part_text.py | 15 ++ .../chat/chat_completion_store_message.py | 15 +- .../types/chat/completion_create_params.py | 25 +++- src/openai/types/responses/response.py | 25 +++- .../types/responses/response_create_params.py | 25 +++- tests/api_resources/chat/test_completions.py | 8 ++ tests/api_resources/test_responses.py | 8 ++ 16 files changed, 371 insertions(+), 55 deletions(-) create mode 100644 src/openai/types/chat/chat_completion_content_part_image.py create mode 100644 src/openai/types/chat/chat_completion_content_part_text.py diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1137af1259..d12300ea76 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.97.2" + ".": "1.98.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 2dc4f680a9..e7fb0bdf9b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml -openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0 -config_hash: e822d0c9082c8b312264403949243179 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-721e6ccaa72205ee14c71f8163129920464fb814b95d3df9567a9476bbd9b7fb.yml +openapi_spec_hash: 2115413a21df8b5bf9e4552a74df4312 +config_hash: 9606bb315a193bfd8da0459040143242 diff --git a/CHANGELOG.md b/CHANGELOG.md index 945e224cf9..669d5a5792 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.98.0 (2025-07-30) + +Full Changelog: [v1.97.2...v1.98.0](https://github.com/openai/openai-python/compare/v1.97.2...v1.98.0) + +### Features + +* **api:** manual updates ([88a8036](https://github.com/openai/openai-python/commit/88a8036c5ea186f36c57029ef4501a0833596f56)) + ## 1.97.2 (2025-07-30) Full Changelog: [v1.97.1...v1.97.2](https://github.com/openai/openai-python/compare/v1.97.1...v1.97.2) diff --git a/pyproject.toml b/pyproject.toml index 5b59053d02..6765611fc2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.97.2" +version = "1.98.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 59fb46ac23..ca890665bc 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.97.2" # x-release-please-version +__version__ = "1.98.0" # x-release-please-version diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 739aa662d4..c851851418 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -248,8 +248,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -388,6 +390,10 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -406,6 +412,12 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -481,9 +493,11 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -520,8 +534,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -668,6 +684,10 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -686,6 +706,12 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -752,9 +778,11 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -791,8 +819,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -939,6 +969,10 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -957,6 +991,12 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -1023,9 +1063,11 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -1061,8 +1103,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1104,8 +1148,10 @@ def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": response_format, + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, @@ -1615,8 +1661,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1755,6 +1803,10 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -1773,6 +1825,12 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -1848,9 +1906,11 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -1887,8 +1947,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2035,6 +2097,10 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -2053,6 +2119,12 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -2119,9 +2191,11 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -2158,8 +2232,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2306,6 +2382,10 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -2324,6 +2404,12 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -2390,9 +2476,11 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -2428,8 +2516,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2471,8 +2561,10 @@ async def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": response_format, + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index fe99aa851d..8de46dbab8 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -87,7 +87,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -188,11 +190,21 @@ def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -267,9 +279,11 @@ def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -297,7 +311,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -404,11 +420,21 @@ def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -476,9 +502,11 @@ def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -506,7 +534,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -613,11 +643,21 @@ def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -685,9 +725,11 @@ def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -713,7 +755,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -747,7 +791,9 @@ def create( "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, @@ -1352,7 +1398,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -1453,11 +1501,21 @@ async def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -1532,9 +1590,11 @@ async def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1562,7 +1622,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1669,11 +1731,21 @@ async def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -1741,9 +1813,11 @@ async def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1771,7 +1845,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1878,11 +1954,21 @@ async def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -1950,9 +2036,11 @@ async def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1978,7 +2066,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -2012,7 +2102,9 @@ async def create( "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 0945bcad11..dc26198567 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -28,7 +28,9 @@ from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort +from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall +from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam diff --git a/src/openai/types/chat/chat_completion_content_part_image.py b/src/openai/types/chat/chat_completion_content_part_image.py new file mode 100644 index 0000000000..c1386b9dd3 --- /dev/null +++ b/src/openai/types/chat/chat_completion_content_part_image.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionContentPartImage", "ImageURL"] + + +class ImageURL(BaseModel): + url: str + """Either a URL of the image or the base64 encoded image data.""" + + detail: Optional[Literal["auto", "low", "high"]] = None + """Specifies the detail level of the image. + + Learn more in the + [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + """ + + +class ChatCompletionContentPartImage(BaseModel): + image_url: ImageURL + + type: Literal["image_url"] + """The type of the content part.""" diff --git a/src/openai/types/chat/chat_completion_content_part_text.py b/src/openai/types/chat/chat_completion_content_part_text.py new file mode 100644 index 0000000000..f09f35f708 --- /dev/null +++ b/src/openai/types/chat/chat_completion_content_part_text.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionContentPartText"] + + +class ChatCompletionContentPartText(BaseModel): + text: str + """The text content.""" + + type: Literal["text"] + """The type of the content part.""" diff --git a/src/openai/types/chat/chat_completion_store_message.py b/src/openai/types/chat/chat_completion_store_message.py index 8dc093f7b8..661342716b 100644 --- a/src/openai/types/chat/chat_completion_store_message.py +++ b/src/openai/types/chat/chat_completion_store_message.py @@ -1,10 +1,23 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Union, Optional +from typing_extensions import TypeAlias + from .chat_completion_message import ChatCompletionMessage +from .chat_completion_content_part_text import ChatCompletionContentPartText +from .chat_completion_content_part_image import ChatCompletionContentPartImage + +__all__ = ["ChatCompletionStoreMessage", "ChatCompletionStoreMessageContentPart"] -__all__ = ["ChatCompletionStoreMessage"] +ChatCompletionStoreMessageContentPart: TypeAlias = Union[ChatCompletionContentPartText, ChatCompletionContentPartImage] class ChatCompletionStoreMessage(ChatCompletionMessage): id: str """The identifier of the chat message.""" + + content_parts: Optional[List[ChatCompletionStoreMessageContentPart]] = None + """ + If a content parts array was provided, this is an array of `text` and + `image_url` parts. Otherwise, null. + """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 191793c18f..20d7c187f8 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -177,6 +177,13 @@ class CompletionCreateParamsBase(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ + prompt_cache_key: str + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + """ + reasoning_effort: Optional[ReasoningEffort] """**o-series models only** @@ -199,6 +206,15 @@ class CompletionCreateParamsBase(TypedDict, total=False): preferred for models that support it. """ + safety_identifier: str + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + seed: Optional[int] """ This feature is in Beta. If specified, our system will make a best effort to @@ -293,11 +309,12 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ user: str - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ web_search_options: WebSearchOptions diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 2af85d03fb..7db466dfe7 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -163,6 +163,13 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ + prompt_cache_key: Optional[str] = None + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + """ + reasoning: Optional[Reasoning] = None """**o-series models only** @@ -170,6 +177,15 @@ class Response(BaseModel): [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ + safety_identifier: Optional[str] = None + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None """Specifies the processing type used for serving the request. @@ -229,11 +245,12 @@ class Response(BaseModel): """ user: Optional[str] = None - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ @property diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 08feefd081..4a78d7c028 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -123,6 +123,13 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ + prompt_cache_key: str + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + """ + reasoning: Optional[Reasoning] """**o-series models only** @@ -130,6 +137,15 @@ class ResponseCreateParamsBase(TypedDict, total=False): [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ + safety_identifier: str + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] """Specifies the processing type used for serving the request. @@ -221,11 +237,12 @@ class ResponseCreateParamsBase(TypedDict, total=False): """ user: str - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index aa8f58f0e5..2758d980ed 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -72,8 +72,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", @@ -199,8 +201,10 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", @@ -501,8 +505,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", @@ -628,8 +634,10 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 158654ee70..63e47d8a69 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -43,11 +43,13 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, stream=False, @@ -116,11 +118,13 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, temperature=1, @@ -380,11 +384,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, stream=False, @@ -453,11 +459,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, temperature=1, From b204d41e0f1430b23207bbc2b809fa39e17b3564 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 30 Jul 2025 16:49:47 +0100 Subject: [PATCH 40/90] fix: add missing prompt_cache_key & prompt_cache_key params --- .../resources/chat/completions/completions.py | 16 ++++++++++++++++ src/openai/resources/responses/responses.py | 8 ++++++++ 2 files changed, 24 insertions(+) diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index c851851418..cd1cb2bd7f 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -101,7 +101,9 @@ def parse( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -197,8 +199,10 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, @@ -1378,7 +1382,9 @@ def stream( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1445,7 +1451,9 @@ def stream( parallel_tool_calls=parallel_tool_calls, prediction=prediction, presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, store=store, @@ -1514,7 +1522,9 @@ async def parse( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1610,8 +1620,10 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "store": store, @@ -2791,7 +2803,9 @@ def stream( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2859,7 +2873,9 @@ def stream( parallel_tool_calls=parallel_tool_calls, prediction=prediction, presence_penalty=presence_penalty, + prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, stop=stop, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 8de46dbab8..6d2b133110 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -1001,7 +1001,9 @@ def parse( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -1053,7 +1055,9 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, @@ -2316,7 +2320,9 @@ async def parse( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -2368,7 +2374,9 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, From b989e8c240533d7003d479a947bb1733df84fe71 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 30 Jul 2025 17:02:41 +0000 Subject: [PATCH 41/90] feat(client): support file upload requests --- src/openai/_base_client.py | 5 ++++- src/openai/_files.py | 8 ++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py index 3fe669259f..f71e00f51f 100644 --- a/src/openai/_base_client.py +++ b/src/openai/_base_client.py @@ -534,7 +534,10 @@ def _build_request( is_body_allowed = options.method.lower() != "get" if is_body_allowed: - kwargs["json"] = json_data if is_given(json_data) else None + if isinstance(json_data, bytes): + kwargs["content"] = json_data + else: + kwargs["json"] = json_data if is_given(json_data) else None kwargs["files"] = files else: headers.pop("Content-Type", None) diff --git a/src/openai/_files.py b/src/openai/_files.py index 801a0d2928..7b23ca084a 100644 --- a/src/openai/_files.py +++ b/src/openai/_files.py @@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], _read_file_content(file[1]), *file[2:]) + return (file[0], read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -def _read_file_content(file: FileContent) -> HttpxFileContent: +def read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return pathlib.Path(file).read_bytes() return file @@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: return file if is_tuple_t(file): - return (file[0], await _async_read_file_content(file[1]), *file[2:]) + return (file[0], await async_read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") -async def _async_read_file_content(file: FileContent) -> HttpxFileContent: +async def async_read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return await anyio.Path(file).read_bytes() From 29ce19fcf98027c4e17f449666f62f3e8fce3486 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 11:23:42 +0000 Subject: [PATCH 42/90] chore(internal): fix ruff target version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 6765611fc2..a495edc1a8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -177,7 +177,7 @@ reportPrivateUsage = false [tool.ruff] line-length = 120 output-format = "grouped" -target-version = "py37" +target-version = "py38" [tool.ruff.format] docstring-code-format = true From 2026d53339e61bfd5134e835bce6187baaca5b04 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 16:50:43 +0000 Subject: [PATCH 43/90] feat(api): manual updates --- .stats.yml | 6 ++-- api.md | 4 +-- src/openai/types/responses/__init__.py | 8 ++--- .../responses/response_reasoning_item.py | 19 ++++++++---- .../response_reasoning_item_param.py | 19 ++++++++---- .../response_reasoning_summary_delta_event.py | 30 ------------------- .../response_reasoning_summary_done_event.py | 27 ----------------- .../response_reasoning_text_delta_event.py | 27 +++++++++++++++++ .../response_reasoning_text_done_event.py | 27 +++++++++++++++++ .../types/responses/response_stream_event.py | 8 ++--- .../types/vector_store_search_params.py | 3 +- tests/api_resources/test_vector_stores.py | 4 +-- 12 files changed, 97 insertions(+), 85 deletions(-) delete mode 100644 src/openai/types/responses/response_reasoning_summary_delta_event.py delete mode 100644 src/openai/types/responses/response_reasoning_summary_done_event.py create mode 100644 src/openai/types/responses/response_reasoning_text_delta_event.py create mode 100644 src/openai/types/responses/response_reasoning_text_done_event.py diff --git a/.stats.yml b/.stats.yml index e7fb0bdf9b..f86fa668b1 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-721e6ccaa72205ee14c71f8163129920464fb814b95d3df9567a9476bbd9b7fb.yml -openapi_spec_hash: 2115413a21df8b5bf9e4552a74df4312 -config_hash: 9606bb315a193bfd8da0459040143242 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d6a16b25b969c3e5382e7d413de15bf83d5f7534d5c3ecce64d3a7e847418f9e.yml +openapi_spec_hash: 0c0bcf4aee9ca2a948dd14b890dfe728 +config_hash: aeff9289bd7f8c8482e4d738c3c2fde1 diff --git a/api.md b/api.md index 0280b886d1..657ac0905a 100644 --- a/api.md +++ b/api.md @@ -792,12 +792,12 @@ from openai.types.responses import ( ResponsePrompt, ResponseQueuedEvent, ResponseReasoningItem, - ResponseReasoningSummaryDeltaEvent, - ResponseReasoningSummaryDoneEvent, ResponseReasoningSummaryPartAddedEvent, ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseReasoningSummaryTextDoneEvent, + ResponseReasoningTextDeltaEvent, + ResponseReasoningTextDoneEvent, ResponseRefusalDeltaEvent, ResponseRefusalDoneEvent, ResponseStatus, diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index b563035e78..2e502ed69f 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -94,24 +94,20 @@ from .response_function_tool_call_param import ResponseFunctionToolCallParam as ResponseFunctionToolCallParam from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent as ResponseMcpCallCompletedEvent from .response_function_web_search_param import ResponseFunctionWebSearchParam as ResponseFunctionWebSearchParam +from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent as ResponseReasoningTextDoneEvent from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall as ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList as ResponseInputMessageContentList from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent as ResponseMcpCallInProgressEvent +from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent as ResponseReasoningTextDeltaEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam as ResponseFileSearchToolCallParam from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent as ResponseMcpListToolsFailedEvent from .response_audio_transcript_delta_event import ( ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, ) -from .response_reasoning_summary_done_event import ( - ResponseReasoningSummaryDoneEvent as ResponseReasoningSummaryDoneEvent, -) from .response_mcp_call_arguments_done_event import ( ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent, ) -from .response_reasoning_summary_delta_event import ( - ResponseReasoningSummaryDeltaEvent as ResponseReasoningSummaryDeltaEvent, -) from .response_computer_tool_call_output_item import ( ResponseComputerToolCallOutputItem as ResponseComputerToolCallOutputItem, ) diff --git a/src/openai/types/responses/response_reasoning_item.py b/src/openai/types/responses/response_reasoning_item.py index f5da7802f8..e5cb094e62 100644 --- a/src/openai/types/responses/response_reasoning_item.py +++ b/src/openai/types/responses/response_reasoning_item.py @@ -5,29 +5,38 @@ from ..._models import BaseModel -__all__ = ["ResponseReasoningItem", "Summary"] +__all__ = ["ResponseReasoningItem", "Summary", "Content"] class Summary(BaseModel): text: str - """ - A short summary of the reasoning used by the model when generating the response. - """ + """A summary of the reasoning output from the model so far.""" type: Literal["summary_text"] """The type of the object. Always `summary_text`.""" +class Content(BaseModel): + text: str + """Reasoning text output from the model.""" + + type: Literal["reasoning_text"] + """The type of the object. Always `reasoning_text`.""" + + class ResponseReasoningItem(BaseModel): id: str """The unique identifier of the reasoning content.""" summary: List[Summary] - """Reasoning text contents.""" + """Reasoning summary content.""" type: Literal["reasoning"] """The type of the object. Always `reasoning`.""" + content: Optional[List[Content]] = None + """Reasoning text content.""" + encrypted_content: Optional[str] = None """ The encrypted content of the reasoning item - populated when a response is diff --git a/src/openai/types/responses/response_reasoning_item_param.py b/src/openai/types/responses/response_reasoning_item_param.py index 2cfa5312ed..042b6c05db 100644 --- a/src/openai/types/responses/response_reasoning_item_param.py +++ b/src/openai/types/responses/response_reasoning_item_param.py @@ -5,29 +5,38 @@ from typing import Iterable, Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["ResponseReasoningItemParam", "Summary"] +__all__ = ["ResponseReasoningItemParam", "Summary", "Content"] class Summary(TypedDict, total=False): text: Required[str] - """ - A short summary of the reasoning used by the model when generating the response. - """ + """A summary of the reasoning output from the model so far.""" type: Required[Literal["summary_text"]] """The type of the object. Always `summary_text`.""" +class Content(TypedDict, total=False): + text: Required[str] + """Reasoning text output from the model.""" + + type: Required[Literal["reasoning_text"]] + """The type of the object. Always `reasoning_text`.""" + + class ResponseReasoningItemParam(TypedDict, total=False): id: Required[str] """The unique identifier of the reasoning content.""" summary: Required[Iterable[Summary]] - """Reasoning text contents.""" + """Reasoning summary content.""" type: Required[Literal["reasoning"]] """The type of the object. Always `reasoning`.""" + content: Iterable[Content] + """Reasoning text content.""" + encrypted_content: Optional[str] """ The encrypted content of the reasoning item - populated when a response is diff --git a/src/openai/types/responses/response_reasoning_summary_delta_event.py b/src/openai/types/responses/response_reasoning_summary_delta_event.py deleted file mode 100644 index 519a4f24ac..0000000000 --- a/src/openai/types/responses/response_reasoning_summary_delta_event.py +++ /dev/null @@ -1,30 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningSummaryDeltaEvent"] - - -class ResponseReasoningSummaryDeltaEvent(BaseModel): - delta: object - """The partial update to the reasoning summary content.""" - - item_id: str - """ - The unique identifier of the item for which the reasoning summary is being - updated. - """ - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - summary_index: int - """The index of the summary part within the output item.""" - - type: Literal["response.reasoning_summary.delta"] - """The type of the event. Always 'response.reasoning_summary.delta'.""" diff --git a/src/openai/types/responses/response_reasoning_summary_done_event.py b/src/openai/types/responses/response_reasoning_summary_done_event.py deleted file mode 100644 index 98bcf9cb9d..0000000000 --- a/src/openai/types/responses/response_reasoning_summary_done_event.py +++ /dev/null @@ -1,27 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["ResponseReasoningSummaryDoneEvent"] - - -class ResponseReasoningSummaryDoneEvent(BaseModel): - item_id: str - """The unique identifier of the item for which the reasoning summary is finalized.""" - - output_index: int - """The index of the output item in the response's output array.""" - - sequence_number: int - """The sequence number of this event.""" - - summary_index: int - """The index of the summary part within the output item.""" - - text: str - """The finalized reasoning summary text.""" - - type: Literal["response.reasoning_summary.done"] - """The type of the event. Always 'response.reasoning_summary.done'.""" diff --git a/src/openai/types/responses/response_reasoning_text_delta_event.py b/src/openai/types/responses/response_reasoning_text_delta_event.py new file mode 100644 index 0000000000..e1df893bac --- /dev/null +++ b/src/openai/types/responses/response_reasoning_text_delta_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningTextDeltaEvent"] + + +class ResponseReasoningTextDeltaEvent(BaseModel): + content_index: int + """The index of the reasoning content part this delta is associated with.""" + + delta: str + """The text delta that was added to the reasoning content.""" + + item_id: str + """The ID of the item this reasoning text delta is associated with.""" + + output_index: int + """The index of the output item this reasoning text delta is associated with.""" + + sequence_number: int + """The sequence number of this event.""" + + type: Literal["response.reasoning_text.delta"] + """The type of the event. Always `response.reasoning_text.delta`.""" diff --git a/src/openai/types/responses/response_reasoning_text_done_event.py b/src/openai/types/responses/response_reasoning_text_done_event.py new file mode 100644 index 0000000000..d22d984e47 --- /dev/null +++ b/src/openai/types/responses/response_reasoning_text_done_event.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseReasoningTextDoneEvent"] + + +class ResponseReasoningTextDoneEvent(BaseModel): + content_index: int + """The index of the reasoning content part.""" + + item_id: str + """The ID of the item this reasoning text is associated with.""" + + output_index: int + """The index of the output item this reasoning text is associated with.""" + + sequence_number: int + """The sequence number of this event.""" + + text: str + """The full text of the completed reasoning content.""" + + type: Literal["response.reasoning_text.done"] + """The type of the event. Always `response.reasoning_text.done`.""" diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py index 98e1d6c34d..d62cf8969b 100644 --- a/src/openai/types/responses/response_stream_event.py +++ b/src/openai/types/responses/response_stream_event.py @@ -23,13 +23,13 @@ from .response_output_item_added_event import ResponseOutputItemAddedEvent from .response_content_part_added_event import ResponseContentPartAddedEvent from .response_mcp_call_completed_event import ResponseMcpCallCompletedEvent +from .response_reasoning_text_done_event import ResponseReasoningTextDoneEvent from .response_mcp_call_in_progress_event import ResponseMcpCallInProgressEvent +from .response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent from .response_audio_transcript_done_event import ResponseAudioTranscriptDoneEvent from .response_mcp_list_tools_failed_event import ResponseMcpListToolsFailedEvent from .response_audio_transcript_delta_event import ResponseAudioTranscriptDeltaEvent -from .response_reasoning_summary_done_event import ResponseReasoningSummaryDoneEvent from .response_mcp_call_arguments_done_event import ResponseMcpCallArgumentsDoneEvent -from .response_reasoning_summary_delta_event import ResponseReasoningSummaryDeltaEvent from .response_image_gen_call_completed_event import ResponseImageGenCallCompletedEvent from .response_mcp_call_arguments_delta_event import ResponseMcpCallArgumentsDeltaEvent from .response_mcp_list_tools_completed_event import ResponseMcpListToolsCompletedEvent @@ -88,6 +88,8 @@ ResponseReasoningSummaryPartDoneEvent, ResponseReasoningSummaryTextDeltaEvent, ResponseReasoningSummaryTextDoneEvent, + ResponseReasoningTextDeltaEvent, + ResponseReasoningTextDoneEvent, ResponseRefusalDeltaEvent, ResponseRefusalDoneEvent, ResponseTextDeltaEvent, @@ -109,8 +111,6 @@ ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningSummaryDeltaEvent, - ResponseReasoningSummaryDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/vector_store_search_params.py b/src/openai/types/vector_store_search_params.py index 17573d0f61..973c49ff5a 100644 --- a/src/openai/types/vector_store_search_params.py +++ b/src/openai/types/vector_store_search_params.py @@ -35,6 +35,7 @@ class VectorStoreSearchParams(TypedDict, total=False): class RankingOptions(TypedDict, total=False): - ranker: Literal["auto", "default-2024-11-15"] + ranker: Literal["none", "auto", "default-2024-11-15"] + """Enable re-ranking; set to `none` to disable, which can help reduce latency.""" score_threshold: float diff --git a/tests/api_resources/test_vector_stores.py b/tests/api_resources/test_vector_stores.py index 5af95fec41..dffd2b1d07 100644 --- a/tests/api_resources/test_vector_stores.py +++ b/tests/api_resources/test_vector_stores.py @@ -243,7 +243,7 @@ def test_method_search_with_all_params(self, client: OpenAI) -> None: }, max_num_results=1, ranking_options={ - "ranker": "auto", + "ranker": "none", "score_threshold": 0, }, rewrite_query=True, @@ -511,7 +511,7 @@ async def test_method_search_with_all_params(self, async_client: AsyncOpenAI) -> }, max_num_results=1, ranking_options={ - "ranker": "auto", + "ranker": "none", "score_threshold": 0, }, rewrite_query=True, From b0ad27a67681f1b6fb473cc75c642efa1f4941d5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 16:51:15 +0000 Subject: [PATCH 44/90] release: 1.99.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d12300ea76..5c9b107c0d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.98.0" + ".": "1.99.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 669d5a5792..e7a49bcc9a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 1.99.0 (2025-08-05) + +Full Changelog: [v1.98.0...v1.99.0](https://github.com/openai/openai-python/compare/v1.98.0...v1.99.0) + +### Features + +* **api:** manual updates ([d4aa726](https://github.com/openai/openai-python/commit/d4aa72602bf489ef270154b881b3967d497d4220)) +* **client:** support file upload requests ([0772e6e](https://github.com/openai/openai-python/commit/0772e6ed8310e15539610b003dd73f72f474ec0c)) + + +### Bug Fixes + +* add missing prompt_cache_key & prompt_cache_key params ([00b49ae](https://github.com/openai/openai-python/commit/00b49ae8d44ea396ac0536fc3ce4658fc669e2f5)) + + +### Chores + +* **internal:** fix ruff target version ([aa6b252](https://github.com/openai/openai-python/commit/aa6b252ae0f25f195dede15755e05dd2f542f42d)) + ## 1.98.0 (2025-07-30) Full Changelog: [v1.97.2...v1.98.0](https://github.com/openai/openai-python/compare/v1.97.2...v1.98.0) diff --git a/pyproject.toml b/pyproject.toml index a495edc1a8..5e0f1fe3ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.98.0" +version = "1.99.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index ca890665bc..a5c9b3df71 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.98.0" # x-release-please-version +__version__ = "1.99.0" # x-release-please-version From fd2c3f12cf3574f92aab2877f2903e6756018867 Mon Sep 17 00:00:00 2001 From: David Meadows Date: Tue, 5 Aug 2025 14:08:26 -0400 Subject: [PATCH 45/90] fix(internal): correct event imports --- examples/image_stream.py | 2 +- src/openai/lib/streaming/responses/_events.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/image_stream.py b/examples/image_stream.py index c188e68717..eab5932534 100644 --- a/examples/image_stream.py +++ b/examples/image_stream.py @@ -50,4 +50,4 @@ def main() -> None: try: main() except Exception as error: - print(f"Error generating image: {error}") \ No newline at end of file + print(f"Error generating image: {error}") diff --git a/src/openai/lib/streaming/responses/_events.py b/src/openai/lib/streaming/responses/_events.py index 4c8a588944..de3342ec9d 100644 --- a/src/openai/lib/streaming/responses/_events.py +++ b/src/openai/lib/streaming/responses/_events.py @@ -31,11 +31,9 @@ ResponseAudioTranscriptDoneEvent, ResponseAudioTranscriptDeltaEvent, ResponseMcpCallArgumentsDoneEvent, - ResponseReasoningSummaryDoneEvent, ResponseImageGenCallCompletedEvent, ResponseMcpCallArgumentsDeltaEvent, ResponseMcpListToolsCompletedEvent, - ResponseReasoningSummaryDeltaEvent, ResponseImageGenCallGeneratingEvent, ResponseImageGenCallInProgressEvent, ResponseMcpListToolsInProgressEvent, @@ -59,6 +57,8 @@ ResponseCodeInterpreterCallInProgressEvent, ResponseCodeInterpreterCallInterpretingEvent, ) +from ....types.responses.response_reasoning_text_done_event import ResponseReasoningTextDoneEvent +from ....types.responses.response_reasoning_text_delta_event import ResponseReasoningTextDeltaEvent TextFormatT = TypeVar( "TextFormatT", @@ -137,8 +137,8 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, - ResponseReasoningSummaryDeltaEvent, - ResponseReasoningSummaryDoneEvent, + ResponseReasoningTextDeltaEvent, + ResponseReasoningTextDoneEvent, ], PropertyInfo(discriminator="type"), ] From a8258744cbecf51321587fc870e8920bd2c07809 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 5 Aug 2025 18:08:59 +0000 Subject: [PATCH 46/90] release: 1.99.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5c9b107c0d..41be9f1017 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.0" + ".": "1.99.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index e7a49bcc9a..4585135511 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.99.1 (2025-08-05) + +Full Changelog: [v1.99.0...v1.99.1](https://github.com/openai/openai-python/compare/v1.99.0...v1.99.1) + +### Bug Fixes + +* **internal:** correct event imports ([2a6d143](https://github.com/openai/openai-python/commit/2a6d1436288a07f67f6afefe5c0b5d6ae32d7e70)) + ## 1.99.0 (2025-08-05) Full Changelog: [v1.98.0...v1.99.0](https://github.com/openai/openai-python/compare/v1.98.0...v1.99.0) diff --git a/pyproject.toml b/pyproject.toml index 5e0f1fe3ea..c71e8c135b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.0" +version = "1.99.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index a5c9b3df71..3fa80adba0 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.0" # x-release-please-version +__version__ = "1.99.1" # x-release-please-version From 936b2f0db2812c74c966a657d45acd972d2fd088 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 7 Aug 2025 10:58:11 +0100 Subject: [PATCH 47/90] chore(tests): bump inline-snapshot dependency --- requirements-dev.lock | 25 +++++--------------- tests/lib/chat/_utils.py | 12 ++++++++++ tests/lib/chat/test_completions.py | 6 ++--- tests/lib/chat/test_completions_streaming.py | 12 ++++++---- 4 files changed, 29 insertions(+), 26 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 1a7500d569..b1886e036f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -35,8 +35,6 @@ attrs==24.2.0 azure-core==1.31.0 # via azure-identity azure-identity==1.19.0 -black==24.10.0 - # via inline-snapshot certifi==2023.7.22 # via httpcore # via httpx @@ -46,9 +44,6 @@ cffi==1.16.0 # via sounddevice charset-normalizer==3.3.2 # via requests -click==8.1.7 - # via black - # via inline-snapshot colorlog==6.7.0 # via nox cryptography==42.0.7 @@ -66,7 +61,7 @@ exceptiongroup==1.2.2 # via trio execnet==2.1.1 # via pytest-xdist -executing==2.1.0 +executing==2.2.0 # via inline-snapshot filelock==3.12.4 # via virtualenv @@ -92,7 +87,7 @@ idna==3.4 importlib-metadata==7.0.0 iniconfig==2.0.0 # via pytest -inline-snapshot==0.10.2 +inline-snapshot==0.27.0 jiter==0.5.0 # via openai markdown-it-py==3.0.0 @@ -109,7 +104,6 @@ multidict==6.5.0 # via yarl mypy==1.14.1 mypy-extensions==1.0.0 - # via black # via mypy nest-asyncio==1.6.0 nodeenv==1.8.0 @@ -122,17 +116,13 @@ numpy==2.0.2 outcome==1.3.0.post0 # via trio packaging==23.2 - # via black # via nox # via pytest pandas==2.2.3 # via openai pandas-stubs==2.1.4.231227 # via openai -pathspec==0.12.1 - # via black platformdirs==3.11.0 - # via black # via virtualenv pluggy==1.5.0 # via pytest @@ -148,11 +138,13 @@ pydantic==2.10.3 pydantic-core==2.27.1 # via pydantic pygments==2.18.0 + # via pytest # via rich pyjwt==2.8.0 # via msal pyright==1.1.399 -pytest==8.3.3 +pytest==8.4.1 + # via inline-snapshot # via pytest-asyncio # via pytest-xdist pytest-asyncio==0.24.0 @@ -185,10 +177,8 @@ sortedcontainers==2.4.0 sounddevice==0.5.1 # via openai time-machine==2.9.0 -toml==0.10.2 - # via inline-snapshot tomli==2.0.2 - # via black + # via inline-snapshot # via mypy # via pytest tqdm==4.66.5 @@ -197,13 +187,10 @@ trio==0.27.0 types-pyaudio==0.2.16.20240516 types-pytz==2024.2.0.20241003 # via pandas-stubs -types-toml==0.10.8.20240310 - # via inline-snapshot types-tqdm==4.66.0.20240417 typing-extensions==4.12.2 # via azure-core # via azure-identity - # via black # via multidict # via mypy # via openai diff --git a/tests/lib/chat/_utils.py b/tests/lib/chat/_utils.py index f3982278f3..0cc1c99952 100644 --- a/tests/lib/chat/_utils.py +++ b/tests/lib/chat/_utils.py @@ -52,3 +52,15 @@ def get_caller_name(*, stacklevel: int = 1) -> str: def clear_locals(string: str, *, stacklevel: int) -> str: caller = get_caller_name(stacklevel=stacklevel + 1) return string.replace(f"{caller}..", "") + + +def get_snapshot_value(snapshot: Any) -> Any: + if not hasattr(snapshot, "_old_value"): + return snapshot + + old = snapshot._old_value + if not hasattr(old, "value"): + return old + + loader = getattr(old.value, "_load_value", None) + return loader() if loader else old.value diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index e7143bbb68..d0bd14ce9e 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -17,7 +17,7 @@ from openai._utils import assert_signatures_in_sync from openai._compat import PYDANTIC_V2 -from ._utils import print_obj +from ._utils import print_obj, get_snapshot_value from ...conftest import base_url from ..schema_types.query import Query @@ -1010,7 +1010,7 @@ def _on_response(response: httpx.Response) -> None: respx_mock.post("/chat/completions").mock( return_value=httpx.Response( 200, - content=content_snapshot._old_value, + content=get_snapshot_value(content_snapshot), headers={"content-type": "application/json"}, ) ) @@ -1052,7 +1052,7 @@ async def _on_response(response: httpx.Response) -> None: respx_mock.post("/chat/completions").mock( return_value=httpx.Response( 200, - content=content_snapshot._old_value, + content=get_snapshot_value(content_snapshot), headers={"content-type": "application/json"}, ) ) diff --git a/tests/lib/chat/test_completions_streaming.py b/tests/lib/chat/test_completions_streaming.py index 4680a73e3a..1daa98c6a0 100644 --- a/tests/lib/chat/test_completions_streaming.py +++ b/tests/lib/chat/test_completions_streaming.py @@ -9,7 +9,11 @@ import pytest from respx import MockRouter from pydantic import BaseModel -from inline_snapshot import external, snapshot, outsource +from inline_snapshot import ( + external, + snapshot, + outsource, # pyright: ignore[reportUnknownVariableType] +) import openai from openai import OpenAI, AsyncOpenAI @@ -26,7 +30,7 @@ ) from openai.lib._parsing._completions import ResponseFormatT -from ._utils import print_obj +from ._utils import print_obj, get_snapshot_value from ...conftest import base_url _T = TypeVar("_T") @@ -1123,7 +1127,7 @@ def _on_response(response: httpx.Response) -> None: respx_mock.post("/chat/completions").mock( return_value=httpx.Response( 200, - content=content_snapshot._old_value._load_value(), + content=get_snapshot_value(content_snapshot), headers={"content-type": "text/event-stream"}, ) ) @@ -1170,7 +1174,7 @@ def _on_response(response: httpx.Response) -> None: respx_mock.post("/chat/completions").mock( return_value=httpx.Response( 200, - content=content_snapshot._old_value._load_value(), + content=get_snapshot_value(content_snapshot), headers={"content-type": "text/event-stream"}, ) ) From caf837bb89a107e3658e56190b03f246ee23b917 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:02:31 +0000 Subject: [PATCH 48/90] feat(api): adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5 --- .stats.yml | 6 +- api.md | 17 ++ src/openai/lib/_parsing/_completions.py | 57 ++++++- src/openai/lib/_parsing/_responses.py | 1 + src/openai/lib/_tools.py | 4 +- src/openai/lib/streaming/chat/_completions.py | 3 +- src/openai/resources/beta/assistants.py | 48 +++--- .../resources/beta/threads/runs/runs.py | 54 +++---- .../resources/chat/completions/completions.py | 138 ++++++++++------ src/openai/resources/responses/responses.py | 148 +++++++++++++++++- src/openai/types/__init__.py | 3 + .../types/beta/assistant_create_params.py | 8 +- .../types/beta/assistant_update_params.py | 14 +- .../types/beta/threads/run_create_params.py | 8 +- src/openai/types/chat/__init__.py | 23 ++- ...at_completion_allowed_tool_choice_param.py | 17 ++ .../chat_completion_allowed_tools_param.py | 32 ++++ .../chat/chat_completion_custom_tool_param.py | 58 +++++++ ...ol.py => chat_completion_function_tool.py} | 4 +- .../chat_completion_function_tool_param.py | 16 ++ ...hat_completion_message_custom_tool_call.py | 26 +++ ...mpletion_message_custom_tool_call_param.py | 26 +++ ...t_completion_message_function_tool_call.py | 31 ++++ ...letion_message_function_tool_call_param.py | 31 ++++ .../chat/chat_completion_message_tool_call.py | 36 ++--- ...chat_completion_message_tool_call_param.py | 32 +--- ...mpletion_named_tool_choice_custom_param.py | 19 +++ ...chat_completion_named_tool_choice_param.py | 2 +- .../chat_completion_stream_options_param.py | 11 ++ ...hat_completion_tool_choice_option_param.py | 7 +- .../types/chat/chat_completion_tool_param.py | 13 +- .../types/chat/completion_create_params.py | 22 ++- .../types/chat/parsed_function_tool_call.py | 4 +- ...create_eval_completions_run_data_source.py | 4 +- ..._eval_completions_run_data_source_param.py | 4 +- src/openai/types/responses/__init__.py | 18 +++ src/openai/types/responses/custom_tool.py | 23 +++ .../types/responses/custom_tool_param.py | 23 +++ src/openai/types/responses/parsed_response.py | 2 + src/openai/types/responses/response.py | 29 ++-- .../types/responses/response_create_params.py | 44 +++++- .../responses/response_custom_tool_call.py | 25 +++ ...onse_custom_tool_call_input_delta_event.py | 24 +++ ...ponse_custom_tool_call_input_done_event.py | 24 +++ .../response_custom_tool_call_output.py | 22 +++ .../response_custom_tool_call_output_param.py | 21 +++ .../response_custom_tool_call_param.py | 24 +++ .../types/responses/response_input_item.py | 4 + .../responses/response_input_item_param.py | 4 + .../types/responses/response_input_param.py | 4 + .../types/responses/response_output_item.py | 2 + .../responses/response_retrieve_params.py | 11 ++ .../types/responses/response_stream_event.py | 4 + src/openai/types/responses/tool.py | 13 +- .../types/responses/tool_choice_allowed.py | 36 +++++ .../responses/tool_choice_allowed_param.py | 36 +++++ .../types/responses/tool_choice_custom.py | 15 ++ .../responses/tool_choice_custom_param.py | 15 ++ src/openai/types/responses/tool_param.py | 2 + src/openai/types/shared/__init__.py | 3 + src/openai/types/shared/chat_model.py | 7 + .../types/shared/custom_tool_input_format.py | 28 ++++ src/openai/types/shared/reasoning.py | 8 +- src/openai/types/shared/reasoning_effort.py | 2 +- .../shared/response_format_text_grammar.py | 15 ++ .../shared/response_format_text_python.py | 12 ++ src/openai/types/shared_params/__init__.py | 1 + src/openai/types/shared_params/chat_model.py | 7 + .../shared_params/custom_tool_input_format.py | 27 ++++ src/openai/types/shared_params/reasoning.py | 8 +- .../types/shared_params/reasoning_effort.py | 2 +- tests/api_resources/beta/test_assistants.py | 8 +- tests/api_resources/beta/threads/test_runs.py | 8 +- tests/api_resources/chat/test_completions.py | 32 +++- tests/api_resources/test_completions.py | 20 ++- tests/api_resources/test_responses.py | 20 ++- 76 files changed, 1293 insertions(+), 267 deletions(-) create mode 100644 src/openai/types/chat/chat_completion_allowed_tool_choice_param.py create mode 100644 src/openai/types/chat/chat_completion_allowed_tools_param.py create mode 100644 src/openai/types/chat/chat_completion_custom_tool_param.py rename src/openai/types/chat/{chat_completion_tool.py => chat_completion_function_tool.py} (80%) create mode 100644 src/openai/types/chat/chat_completion_function_tool_param.py create mode 100644 src/openai/types/chat/chat_completion_message_custom_tool_call.py create mode 100644 src/openai/types/chat/chat_completion_message_custom_tool_call_param.py create mode 100644 src/openai/types/chat/chat_completion_message_function_tool_call.py create mode 100644 src/openai/types/chat/chat_completion_message_function_tool_call_param.py create mode 100644 src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py create mode 100644 src/openai/types/responses/custom_tool.py create mode 100644 src/openai/types/responses/custom_tool_param.py create mode 100644 src/openai/types/responses/response_custom_tool_call.py create mode 100644 src/openai/types/responses/response_custom_tool_call_input_delta_event.py create mode 100644 src/openai/types/responses/response_custom_tool_call_input_done_event.py create mode 100644 src/openai/types/responses/response_custom_tool_call_output.py create mode 100644 src/openai/types/responses/response_custom_tool_call_output_param.py create mode 100644 src/openai/types/responses/response_custom_tool_call_param.py create mode 100644 src/openai/types/responses/tool_choice_allowed.py create mode 100644 src/openai/types/responses/tool_choice_allowed_param.py create mode 100644 src/openai/types/responses/tool_choice_custom.py create mode 100644 src/openai/types/responses/tool_choice_custom_param.py create mode 100644 src/openai/types/shared/custom_tool_input_format.py create mode 100644 src/openai/types/shared/response_format_text_grammar.py create mode 100644 src/openai/types/shared/response_format_text_python.py create mode 100644 src/openai/types/shared_params/custom_tool_input_format.py diff --git a/.stats.yml b/.stats.yml index f86fa668b1..9c1b4e4c54 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d6a16b25b969c3e5382e7d413de15bf83d5f7534d5c3ecce64d3a7e847418f9e.yml -openapi_spec_hash: 0c0bcf4aee9ca2a948dd14b890dfe728 -config_hash: aeff9289bd7f8c8482e4d738c3c2fde1 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f5c45f4ae5c2075cbc603d6910bba3da31c23714c209fbd3fd82a94f634a126b.yml +openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba +config_hash: 9a64321968e21ed72f5c0e02164ea00d diff --git a/api.md b/api.md index 657ac0905a..f05b3f61ee 100644 --- a/api.md +++ b/api.md @@ -6,6 +6,7 @@ from openai.types import ( ChatModel, ComparisonFilter, CompoundFilter, + CustomToolInputFormat, ErrorObject, FunctionDefinition, FunctionParameters, @@ -15,6 +16,8 @@ from openai.types import ( ResponseFormatJSONObject, ResponseFormatJSONSchema, ResponseFormatText, + ResponseFormatTextGrammar, + ResponseFormatTextPython, ResponsesModel, ) ``` @@ -46,6 +49,7 @@ Types: ```python from openai.types.chat import ( ChatCompletion, + ChatCompletionAllowedToolChoice, ChatCompletionAssistantMessageParam, ChatCompletionAudio, ChatCompletionAudioParam, @@ -55,15 +59,20 @@ from openai.types.chat import ( ChatCompletionContentPartInputAudio, ChatCompletionContentPartRefusal, ChatCompletionContentPartText, + ChatCompletionCustomTool, ChatCompletionDeleted, ChatCompletionDeveloperMessageParam, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, + ChatCompletionFunctionTool, ChatCompletionMessage, + ChatCompletionMessageCustomToolCall, + ChatCompletionMessageFunctionToolCall, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionModality, ChatCompletionNamedToolChoice, + ChatCompletionNamedToolChoiceCustom, ChatCompletionPredictionContent, ChatCompletionRole, ChatCompletionStoreMessage, @@ -74,6 +83,7 @@ from openai.types.chat import ( ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam, + ChatCompletionAllowedTools, ChatCompletionReasoningEffort, ) ``` @@ -719,6 +729,7 @@ Types: ```python from openai.types.responses import ( ComputerTool, + CustomTool, EasyInputMessage, FileSearchTool, FunctionTool, @@ -741,6 +752,10 @@ from openai.types.responses import ( ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, ResponseCreatedEvent, + ResponseCustomToolCall, + ResponseCustomToolCallInputDeltaEvent, + ResponseCustomToolCallInputDoneEvent, + ResponseCustomToolCallOutput, ResponseError, ResponseErrorEvent, ResponseFailedEvent, @@ -810,6 +825,8 @@ from openai.types.responses import ( ResponseWebSearchCallInProgressEvent, ResponseWebSearchCallSearchingEvent, Tool, + ToolChoiceAllowed, + ToolChoiceCustom, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceOptions, diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py index c160070b66..e14c33864d 100644 --- a/src/openai/lib/_parsing/_completions.py +++ b/src/openai/lib/_parsing/_completions.py @@ -1,6 +1,7 @@ from __future__ import annotations import json +import logging from typing import TYPE_CHECKING, Any, Iterable, cast from typing_extensions import TypeVar, TypeGuard, assert_never @@ -19,14 +20,15 @@ ParsedChatCompletion, ChatCompletionMessage, ParsedFunctionToolCall, - ChatCompletionToolParam, ParsedChatCompletionMessage, + ChatCompletionFunctionToolParam, completion_create_params, ) from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError from ...types.shared_params import FunctionDefinition from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam -from ...types.chat.chat_completion_message_tool_call import Function +from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam +from ...types.chat.chat_completion_message_function_tool_call import Function ResponseFormatT = TypeVar( "ResponseFormatT", @@ -35,12 +37,36 @@ ) _default_response_format: None = None +log: logging.Logger = logging.getLogger("openai.lib.parsing") + + +def is_strict_chat_completion_tool_param( + tool: ChatCompletionToolParam, +) -> TypeGuard[ChatCompletionFunctionToolParam]: + """Check if the given tool is a strict ChatCompletionFunctionToolParam.""" + if not tool["type"] == "function": + return False + if tool["function"].get("strict") is not True: + return False + + return True + + +def select_strict_chat_completion_tools( + tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, +) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: + """Select only the strict ChatCompletionFunctionToolParams from the given tools.""" + if not is_given(tools): + return NOT_GIVEN + + return [t for t in tools if is_strict_chat_completion_tool_param(t)] + def validate_input_tools( tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, -) -> None: +) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: if not is_given(tools): - return + return NOT_GIVEN for tool in tools: if tool["type"] != "function": @@ -54,6 +80,8 @@ def validate_input_tools( f"`{tool['function']['name']}` is not strict. Only `strict` function tools can be auto-parsed" ) + return cast(Iterable[ChatCompletionFunctionToolParam], tools) + def parse_chat_completion( *, @@ -95,6 +123,14 @@ def parse_chat_completion( type_=ParsedFunctionToolCall, ) ) + elif tool_call.type == "custom": + # warn user that custom tool calls are not callable here + log.warning( + "Custom tool calls are not callable. Ignoring tool call: %s - %s", + tool_call.id, + tool_call.custom.name, + stacklevel=2, + ) elif TYPE_CHECKING: # type: ignore[unreachable] assert_never(tool_call) else: @@ -129,13 +165,15 @@ def parse_chat_completion( ) -def get_input_tool_by_name(*, input_tools: list[ChatCompletionToolParam], name: str) -> ChatCompletionToolParam | None: - return next((t for t in input_tools if t.get("function", {}).get("name") == name), None) +def get_input_tool_by_name( + *, input_tools: list[ChatCompletionToolParam], name: str +) -> ChatCompletionFunctionToolParam | None: + return next((t for t in input_tools if t["type"] == "function" and t.get("function", {}).get("name") == name), None) def parse_function_tool_arguments( *, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction -) -> object: +) -> object | None: input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name) if not input_tool: return None @@ -149,7 +187,7 @@ def parse_function_tool_arguments( if not input_fn.get("strict"): return None - return json.loads(function.arguments) + return json.loads(function.arguments) # type: ignore[no-any-return] def maybe_parse_content( @@ -209,6 +247,9 @@ def is_response_format_param(response_format: object) -> TypeGuard[ResponseForma def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool: + if input_tool["type"] != "function": + return False + input_fn = cast(object, input_tool.get("function")) if isinstance(input_fn, PydanticFunctionTool): return True diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index 41be1d37b0..2a30ac836c 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -110,6 +110,7 @@ def parse_response( or output.type == "local_shell_call" or output.type == "mcp_list_tools" or output.type == "exec" + or output.type == "custom_tool_call" ): output_list.append(output) elif TYPE_CHECKING: # type: ignore diff --git a/src/openai/lib/_tools.py b/src/openai/lib/_tools.py index 415d750074..4070ad63bb 100644 --- a/src/openai/lib/_tools.py +++ b/src/openai/lib/_tools.py @@ -5,7 +5,7 @@ import pydantic from ._pydantic import to_strict_json_schema -from ..types.chat import ChatCompletionToolParam +from ..types.chat import ChatCompletionFunctionToolParam from ..types.shared_params import FunctionDefinition from ..types.responses.function_tool_param import FunctionToolParam as ResponsesFunctionToolParam @@ -42,7 +42,7 @@ def pydantic_function_tool( *, name: str | None = None, # inferred from class name by default description: str | None = None, # inferred from class docstring by default -) -> ChatCompletionToolParam: +) -> ChatCompletionFunctionToolParam: if description is None: # note: we intentionally don't use `.getdoc()` to avoid # including pydantic's docstrings diff --git a/src/openai/lib/streaming/chat/_completions.py b/src/openai/lib/streaming/chat/_completions.py index 2cf37efeae..1dff628a20 100644 --- a/src/openai/lib/streaming/chat/_completions.py +++ b/src/openai/lib/streaming/chat/_completions.py @@ -37,11 +37,12 @@ parse_function_tool_arguments, ) from ...._streaming import Stream, AsyncStream -from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolParam +from ....types.chat import ChatCompletionChunk, ParsedChatCompletion from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError from ....types.chat.chat_completion import ChoiceLogprobs from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam +from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam class ChatCompletionStream(Generic[ResponseFormatT]): diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 9059d93616..fe0c99c88a 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -96,12 +96,11 @@ def create( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -220,6 +219,12 @@ def update( model: Union[ str, Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", @@ -298,12 +303,11 @@ def update( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -545,12 +549,11 @@ async def create( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -669,6 +672,12 @@ async def update( model: Union[ str, Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", @@ -747,12 +756,11 @@ async def update( name: The name of the assistant. The maximum length is 256 characters. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 3d9ae9759e..01246d7c12 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -167,12 +167,11 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -322,12 +321,11 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -473,12 +471,11 @@ def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1600,12 +1597,11 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1755,12 +1751,11 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), @@ -1906,12 +1901,11 @@ async def create( [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index cd1cb2bd7f..65f91396bd 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -115,6 +115,7 @@ def parse( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -165,7 +166,7 @@ class MathResponse(BaseModel): print("answer: ", message.parsed.final_answer) ``` """ - _validate_input_tools(tools) + chat_completion_tools = _validate_input_tools(tools) extra_headers = { "X-Stainless-Helper-Method": "chat.completions.parse", @@ -176,7 +177,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma return _parse_chat_completion( response_format=response_format, chat_completion=raw_completion, - input_tools=tools, + input_tools=chat_completion_tools, ) return self._post( @@ -215,6 +216,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "verbosity": verbosity, "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParams, @@ -268,6 +270,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -398,12 +401,11 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -483,9 +485,9 @@ def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -503,6 +505,10 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -553,6 +559,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -692,12 +699,11 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -768,9 +774,9 @@ def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -788,6 +794,10 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -838,6 +848,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -977,12 +988,11 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -1053,9 +1063,9 @@ def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -1073,6 +1083,10 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -1123,6 +1137,7 @@ def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1168,6 +1183,7 @@ def create( "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "verbosity": verbosity, "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParamsStreaming @@ -1396,6 +1412,7 @@ def stream( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1465,6 +1482,7 @@ def stream( top_logprobs=top_logprobs, top_p=top_p, user=user, + verbosity=verbosity, web_search_options=web_search_options, extra_headers=extra_headers, extra_query=extra_query, @@ -1536,6 +1554,7 @@ async def parse( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1636,6 +1655,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "verbosity": verbosity, "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParams, @@ -1689,6 +1709,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1819,12 +1840,11 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -1904,9 +1924,9 @@ async def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -1924,6 +1944,10 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -1974,6 +1998,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2113,12 +2138,11 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -2189,9 +2213,9 @@ async def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -2209,6 +2233,10 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -2259,6 +2287,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2398,12 +2427,11 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning_effort: **o-series models only** - - Constrains effort on reasoning for + reasoning_effort: Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. response_format: An object specifying the format that the model must output. @@ -2474,9 +2502,9 @@ async def create( `none` is the default when no tools are present. `auto` is the default if tools are present. - tools: A list of tools the model may call. Currently, only functions are supported as a - tool. Use this to provide a list of functions the model may generate JSON inputs - for. A max of 128 functions are supported. + tools: A list of tools the model may call. You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -2494,6 +2522,10 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat). @@ -2544,6 +2576,7 @@ async def create( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2589,6 +2622,7 @@ async def create( "top_logprobs": top_logprobs, "top_p": top_p, "user": user, + "verbosity": verbosity, "web_search_options": web_search_options, }, completion_create_params.CompletionCreateParamsStreaming @@ -2817,6 +2851,7 @@ def stream( top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, web_search_options: completion_create_params.WebSearchOptions | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2887,11 +2922,12 @@ def stream( top_logprobs=top_logprobs, top_p=top_p, user=user, + verbosity=verbosity, + web_search_options=web_search_options, extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, - web_search_options=web_search_options, ) return AsyncChatCompletionStreamManager( api_request, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 6d2b133110..5ba22418ed 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -93,6 +93,7 @@ def create( service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -101,6 +102,7 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -232,6 +234,8 @@ def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -259,8 +263,10 @@ def create( Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -285,6 +291,10 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -316,6 +326,7 @@ def create( safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -324,6 +335,7 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -455,6 +467,8 @@ def create( store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -482,8 +496,10 @@ def create( Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -508,6 +524,10 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -539,6 +559,7 @@ def create( safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -547,6 +568,7 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -678,6 +700,8 @@ def create( store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -705,8 +729,10 @@ def create( Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -731,6 +757,10 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -761,6 +791,7 @@ def create( service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -769,6 +800,7 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -797,6 +829,7 @@ def create( "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, @@ -805,6 +838,7 @@ def create( "top_p": top_p, "truncation": truncation, "user": user, + "verbosity": verbosity, }, response_create_params.ResponseCreateParamsStreaming if stream @@ -850,6 +884,7 @@ def stream( previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -881,6 +916,7 @@ def stream( previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -906,6 +942,7 @@ def stream( "previous_response_id": previous_response_id, "reasoning": reasoning, "store": store, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, @@ -950,6 +987,7 @@ def stream( parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, store=store, + stream_options=stream_options, stream=True, temperature=temperature, text=text, @@ -1007,6 +1045,7 @@ def parse( service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -1015,6 +1054,7 @@ def parse( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1061,6 +1101,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, @@ -1069,6 +1110,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "top_p": top_p, "truncation": truncation, "user": user, + "verbosity": verbosity, }, response_create_params.ResponseCreateParams, ), @@ -1090,6 +1132,7 @@ def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1154,6 +1197,13 @@ def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. stream: If set to true, the model response data will be streamed to the client as it is @@ -1180,6 +1230,7 @@ def retrieve( *, stream: Literal[True], include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1202,6 +1253,13 @@ def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -1221,6 +1279,7 @@ def retrieve( *, stream: bool, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -1243,6 +1302,13 @@ def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -1260,6 +1326,7 @@ def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -1281,6 +1348,7 @@ def retrieve( query=maybe_transform( { "include": include, + "include_obfuscation": include_obfuscation, "starting_after": starting_after, "stream": stream, }, @@ -1408,6 +1476,7 @@ async def create( service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -1416,6 +1485,7 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1547,6 +1617,8 @@ async def create( [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming) for more information. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -1574,8 +1646,10 @@ async def create( Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -1600,6 +1674,10 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1631,6 +1709,7 @@ async def create( safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -1639,6 +1718,7 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1770,6 +1850,8 @@ async def create( store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -1797,8 +1879,10 @@ async def create( Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -1823,6 +1907,10 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1854,6 +1942,7 @@ async def create( safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -1862,6 +1951,7 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1993,6 +2083,8 @@ async def create( store: Whether to store the generated model response for later retrieval via API. + stream_options: Options for streaming responses. Only set this when you set `stream: true`. + temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but @@ -2020,8 +2112,10 @@ async def create( Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. @@ -2046,6 +2140,10 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + verbosity: Constrains the verbosity of the model's response. Lower values will result in + more concise responses, while higher values will result in more verbose + responses. Currently supported values are `low`, `medium`, and `high`. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -2076,6 +2174,7 @@ async def create( service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -2084,6 +2183,7 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2112,6 +2212,7 @@ async def create( "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, @@ -2120,6 +2221,7 @@ async def create( "top_p": top_p, "truncation": truncation, "user": user, + "verbosity": verbosity, }, response_create_params.ResponseCreateParamsStreaming if stream @@ -2165,6 +2267,7 @@ def stream( previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -2196,6 +2299,7 @@ def stream( previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -2221,6 +2325,7 @@ def stream( "previous_response_id": previous_response_id, "reasoning": reasoning, "store": store, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, @@ -2266,6 +2371,7 @@ def stream( parallel_tool_calls=parallel_tool_calls, previous_response_id=previous_response_id, store=store, + stream_options=stream_options, temperature=temperature, text=text, tool_choice=tool_choice, @@ -2326,6 +2432,7 @@ async def parse( service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, + stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, @@ -2334,6 +2441,7 @@ async def parse( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, + verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2380,6 +2488,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "service_tier": service_tier, "store": store, "stream": stream, + "stream_options": stream_options, "temperature": temperature, "text": text, "tool_choice": tool_choice, @@ -2388,6 +2497,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: "top_p": top_p, "truncation": truncation, "user": user, + "verbosity": verbosity, }, response_create_params.ResponseCreateParams, ), @@ -2409,6 +2519,7 @@ async def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2473,6 +2584,13 @@ async def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. stream: If set to true, the model response data will be streamed to the client as it is @@ -2499,6 +2617,7 @@ async def retrieve( *, stream: Literal[True], include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2521,6 +2640,13 @@ async def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -2540,6 +2666,7 @@ async def retrieve( *, stream: bool, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -2562,6 +2689,13 @@ async def retrieve( include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. + include_obfuscation: When true, stream obfuscation will be enabled. Stream obfuscation adds random + characters to an `obfuscation` field on streaming delta events to normalize + payload sizes as a mitigation to certain side-channel attacks. These obfuscation + fields are included by default, but add a small amount of overhead to the data + stream. You can set `include_obfuscation` to false to optimize for bandwidth if + you trust the network links between your application and the OpenAI API. + starting_after: The sequence number of the event after which to start streaming. extra_headers: Send extra headers @@ -2579,6 +2713,7 @@ async def retrieve( response_id: str, *, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + include_obfuscation: bool | NotGiven = NOT_GIVEN, starting_after: int | NotGiven = NOT_GIVEN, stream: Literal[False] | Literal[True] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -2600,6 +2735,7 @@ async def retrieve( query=await async_maybe_transform( { "include": include, + "include_obfuscation": include_obfuscation, "starting_after": starting_after, "stream": stream, }, diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 51f3ee5c9b..1844f71ba7 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -18,8 +18,11 @@ FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters, ResponseFormatText as ResponseFormatText, + CustomToolInputFormat as CustomToolInputFormat, ResponseFormatJSONObject as ResponseFormatJSONObject, ResponseFormatJSONSchema as ResponseFormatJSONSchema, + ResponseFormatTextPython as ResponseFormatTextPython, + ResponseFormatTextGrammar as ResponseFormatTextGrammar, ) from .upload import Upload as Upload from .embedding import Embedding as Embedding diff --git a/src/openai/types/beta/assistant_create_params.py b/src/openai/types/beta/assistant_create_params.py index 8b3c331850..4b03dc0ea6 100644 --- a/src/openai/types/beta/assistant_create_params.py +++ b/src/openai/types/beta/assistant_create_params.py @@ -58,12 +58,12 @@ class AssistantCreateParams(TypedDict, total=False): """The name of the assistant. The maximum length is 256 characters.""" reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: Optional[AssistantResponseFormatOptionParam] diff --git a/src/openai/types/beta/assistant_update_params.py b/src/openai/types/beta/assistant_update_params.py index b28094a6a5..e032554db8 100644 --- a/src/openai/types/beta/assistant_update_params.py +++ b/src/openai/types/beta/assistant_update_params.py @@ -36,6 +36,12 @@ class AssistantUpdateParams(TypedDict, total=False): model: Union[ str, Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", @@ -87,12 +93,12 @@ class AssistantUpdateParams(TypedDict, total=False): """The name of the assistant. The maximum length is 256 characters.""" reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: Optional[AssistantResponseFormatOptionParam] diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index fc70227862..f9defcb19c 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -108,12 +108,12 @@ class RunCreateParamsBase(TypedDict, total=False): """ reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: Optional[AssistantResponseFormatOptionParam] diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index dc26198567..ce1cf4522a 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -4,7 +4,6 @@ from .chat_completion import ChatCompletion as ChatCompletion from .chat_completion_role import ChatCompletionRole as ChatCompletionRole -from .chat_completion_tool import ChatCompletionTool as ChatCompletionTool from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .completion_list_params import CompletionListParams as CompletionListParams @@ -24,16 +23,20 @@ ) from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam +from .chat_completion_function_tool import ChatCompletionFunctionTool as ChatCompletionFunctionTool from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText +from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam +from .chat_completion_allowed_tools_param import ChatCompletionAllowedToolsParam as ChatCompletionAllowedToolsParam +from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam as ChatCompletionFunctionToolParam from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam from .chat_completion_function_message_param import ( @@ -57,18 +60,36 @@ from .chat_completion_content_part_image_param import ( ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam, ) +from .chat_completion_message_custom_tool_call import ( + ChatCompletionMessageCustomToolCall as ChatCompletionMessageCustomToolCall, +) from .chat_completion_prediction_content_param import ( ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam, ) from .chat_completion_tool_choice_option_param import ( ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, ) +from .chat_completion_allowed_tool_choice_param import ( + ChatCompletionAllowedToolChoiceParam as ChatCompletionAllowedToolChoiceParam, +) from .chat_completion_content_part_refusal_param import ( ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam, ) from .chat_completion_function_call_option_param import ( ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, ) +from .chat_completion_message_function_tool_call import ( + ChatCompletionMessageFunctionToolCall as ChatCompletionMessageFunctionToolCall, +) from .chat_completion_content_part_input_audio_param import ( ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam, ) +from .chat_completion_message_custom_tool_call_param import ( + ChatCompletionMessageCustomToolCallParam as ChatCompletionMessageCustomToolCallParam, +) +from .chat_completion_named_tool_choice_custom_param import ( + ChatCompletionNamedToolChoiceCustomParam as ChatCompletionNamedToolChoiceCustomParam, +) +from .chat_completion_message_function_tool_call_param import ( + ChatCompletionMessageFunctionToolCallParam as ChatCompletionMessageFunctionToolCallParam, +) diff --git a/src/openai/types/chat/chat_completion_allowed_tool_choice_param.py b/src/openai/types/chat/chat_completion_allowed_tool_choice_param.py new file mode 100644 index 0000000000..813e6293f9 --- /dev/null +++ b/src/openai/types/chat/chat_completion_allowed_tool_choice_param.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_allowed_tools_param import ChatCompletionAllowedToolsParam + +__all__ = ["ChatCompletionAllowedToolChoiceParam"] + + +class ChatCompletionAllowedToolChoiceParam(TypedDict, total=False): + allowed_tools: Required[ChatCompletionAllowedToolsParam] + """Constrains the tools available to the model to a pre-defined set.""" + + type: Required[Literal["allowed_tools"]] + """Allowed tool configuration type. Always `allowed_tools`.""" diff --git a/src/openai/types/chat/chat_completion_allowed_tools_param.py b/src/openai/types/chat/chat_completion_allowed_tools_param.py new file mode 100644 index 0000000000..d9b72d8f34 --- /dev/null +++ b/src/openai/types/chat/chat_completion_allowed_tools_param.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionAllowedToolsParam"] + + +class ChatCompletionAllowedToolsParam(TypedDict, total=False): + mode: Required[Literal["auto", "required"]] + """Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + """ + + tools: Required[Iterable[Dict[str, object]]] + """A list of tool definitions that the model should be allowed to call. + + For the Chat Completions API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "function": { "name": "get_weather" } }, + { "type": "function", "function": { "name": "get_time" } } + ] + ``` + """ diff --git a/src/openai/types/chat/chat_completion_custom_tool_param.py b/src/openai/types/chat/chat_completion_custom_tool_param.py new file mode 100644 index 0000000000..14959ee449 --- /dev/null +++ b/src/openai/types/chat/chat_completion_custom_tool_param.py @@ -0,0 +1,58 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = [ + "ChatCompletionCustomToolParam", + "Custom", + "CustomFormat", + "CustomFormatText", + "CustomFormatGrammar", + "CustomFormatGrammarGrammar", +] + + +class CustomFormatText(TypedDict, total=False): + type: Required[Literal["text"]] + """Unconstrained text format. Always `text`.""" + + +class CustomFormatGrammarGrammar(TypedDict, total=False): + definition: Required[str] + """The grammar definition.""" + + syntax: Required[Literal["lark", "regex"]] + """The syntax of the grammar definition. One of `lark` or `regex`.""" + + +class CustomFormatGrammar(TypedDict, total=False): + grammar: Required[CustomFormatGrammarGrammar] + """Your chosen grammar.""" + + type: Required[Literal["grammar"]] + """Grammar format. Always `grammar`.""" + + +CustomFormat: TypeAlias = Union[CustomFormatText, CustomFormatGrammar] + + +class Custom(TypedDict, total=False): + name: Required[str] + """The name of the custom tool, used to identify it in tool calls.""" + + description: str + """Optional description of the custom tool, used to provide more context.""" + + format: CustomFormat + """The input format for the custom tool. Default is unconstrained text.""" + + +class ChatCompletionCustomToolParam(TypedDict, total=False): + custom: Required[Custom] + """Properties of the custom tool.""" + + type: Required[Literal["custom"]] + """The type of the custom tool. Always `custom`.""" diff --git a/src/openai/types/chat/chat_completion_tool.py b/src/openai/types/chat/chat_completion_function_tool.py similarity index 80% rename from src/openai/types/chat/chat_completion_tool.py rename to src/openai/types/chat/chat_completion_function_tool.py index ae9126f906..641568acf1 100644 --- a/src/openai/types/chat/chat_completion_tool.py +++ b/src/openai/types/chat/chat_completion_function_tool.py @@ -5,10 +5,10 @@ from ..._models import BaseModel from ..shared.function_definition import FunctionDefinition -__all__ = ["ChatCompletionTool"] +__all__ = ["ChatCompletionFunctionTool"] -class ChatCompletionTool(BaseModel): +class ChatCompletionFunctionTool(BaseModel): function: FunctionDefinition type: Literal["function"] diff --git a/src/openai/types/chat/chat_completion_function_tool_param.py b/src/openai/types/chat/chat_completion_function_tool_param.py new file mode 100644 index 0000000000..a39feea542 --- /dev/null +++ b/src/openai/types/chat/chat_completion_function_tool_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.function_definition import FunctionDefinition + +__all__ = ["ChatCompletionFunctionToolParam"] + + +class ChatCompletionFunctionToolParam(TypedDict, total=False): + function: Required[FunctionDefinition] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/openai/types/chat/chat_completion_message_custom_tool_call.py b/src/openai/types/chat/chat_completion_message_custom_tool_call.py new file mode 100644 index 0000000000..b13c176afe --- /dev/null +++ b/src/openai/types/chat/chat_completion_message_custom_tool_call.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionMessageCustomToolCall", "Custom"] + + +class Custom(BaseModel): + input: str + """The input for the custom tool call generated by the model.""" + + name: str + """The name of the custom tool to call.""" + + +class ChatCompletionMessageCustomToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + custom: Custom + """The custom tool that the model called.""" + + type: Literal["custom"] + """The type of the tool. Always `custom`.""" diff --git a/src/openai/types/chat/chat_completion_message_custom_tool_call_param.py b/src/openai/types/chat/chat_completion_message_custom_tool_call_param.py new file mode 100644 index 0000000000..3753e0f200 --- /dev/null +++ b/src/openai/types/chat/chat_completion_message_custom_tool_call_param.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionMessageCustomToolCallParam", "Custom"] + + +class Custom(TypedDict, total=False): + input: Required[str] + """The input for the custom tool call generated by the model.""" + + name: Required[str] + """The name of the custom tool to call.""" + + +class ChatCompletionMessageCustomToolCallParam(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + custom: Required[Custom] + """The custom tool that the model called.""" + + type: Required[Literal["custom"]] + """The type of the tool. Always `custom`.""" diff --git a/src/openai/types/chat/chat_completion_message_function_tool_call.py b/src/openai/types/chat/chat_completion_message_function_tool_call.py new file mode 100644 index 0000000000..d056d9aff6 --- /dev/null +++ b/src/openai/types/chat/chat_completion_message_function_tool_call.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionMessageFunctionToolCall", "Function"] + + +class Function(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChatCompletionMessageFunctionToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: Function + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/openai/types/chat/chat_completion_message_function_tool_call_param.py b/src/openai/types/chat/chat_completion_message_function_tool_call_param.py new file mode 100644 index 0000000000..7c827edd2c --- /dev/null +++ b/src/openai/types/chat/chat_completion_message_function_tool_call_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionMessageFunctionToolCallParam", "Function"] + + +class Function(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class ChatCompletionMessageFunctionToolCallParam(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[Function] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/openai/types/chat/chat_completion_message_tool_call.py b/src/openai/types/chat/chat_completion_message_tool_call.py index 4fec667096..c254774626 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_tool_call.py @@ -1,31 +1,15 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing_extensions import Literal +from typing import Union +from typing_extensions import Annotated, TypeAlias -from ..._models import BaseModel +from ..._utils import PropertyInfo +from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall +from .chat_completion_message_function_tool_call import ChatCompletionMessageFunctionToolCall -__all__ = ["ChatCompletionMessageToolCall", "Function"] +__all__ = ["ChatCompletionMessageToolCall"] - -class Function(BaseModel): - arguments: str - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: str - """The name of the function to call.""" - - -class ChatCompletionMessageToolCall(BaseModel): - id: str - """The ID of the tool call.""" - - function: Function - """The function that the model called.""" - - type: Literal["function"] - """The type of the tool. Currently, only `function` is supported.""" +ChatCompletionMessageToolCall: TypeAlias = Annotated[ + Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/chat/chat_completion_message_tool_call_param.py b/src/openai/types/chat/chat_completion_message_tool_call_param.py index f616c363d0..96ba6521f0 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call_param.py +++ b/src/openai/types/chat/chat_completion_message_tool_call_param.py @@ -2,30 +2,14 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing import Union +from typing_extensions import TypeAlias -__all__ = ["ChatCompletionMessageToolCallParam", "Function"] +from .chat_completion_message_custom_tool_call_param import ChatCompletionMessageCustomToolCallParam +from .chat_completion_message_function_tool_call_param import ChatCompletionMessageFunctionToolCallParam +__all__ = ["ChatCompletionMessageToolCallParam"] -class Function(TypedDict, total=False): - arguments: Required[str] - """ - The arguments to call the function with, as generated by the model in JSON - format. Note that the model does not always generate valid JSON, and may - hallucinate parameters not defined by your function schema. Validate the - arguments in your code before calling your function. - """ - - name: Required[str] - """The name of the function to call.""" - - -class ChatCompletionMessageToolCallParam(TypedDict, total=False): - id: Required[str] - """The ID of the tool call.""" - - function: Required[Function] - """The function that the model called.""" - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" +ChatCompletionMessageToolCallParam: TypeAlias = Union[ + ChatCompletionMessageFunctionToolCallParam, ChatCompletionMessageCustomToolCallParam +] diff --git a/src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py b/src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py new file mode 100644 index 0000000000..1c123c0acb --- /dev/null +++ b/src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionNamedToolChoiceCustomParam", "Custom"] + + +class Custom(TypedDict, total=False): + name: Required[str] + """The name of the custom tool to call.""" + + +class ChatCompletionNamedToolChoiceCustomParam(TypedDict, total=False): + custom: Required[Custom] + + type: Required[Literal["custom"]] + """For custom tool calling, the type is always `custom`.""" diff --git a/src/openai/types/chat/chat_completion_named_tool_choice_param.py b/src/openai/types/chat/chat_completion_named_tool_choice_param.py index 369f8b42dd..ae1acfb909 100644 --- a/src/openai/types/chat/chat_completion_named_tool_choice_param.py +++ b/src/openai/types/chat/chat_completion_named_tool_choice_param.py @@ -16,4 +16,4 @@ class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): function: Required[Function] type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" + """For function calling, the type is always `function`.""" diff --git a/src/openai/types/chat/chat_completion_stream_options_param.py b/src/openai/types/chat/chat_completion_stream_options_param.py index 471e0eba98..fc3191d2d1 100644 --- a/src/openai/types/chat/chat_completion_stream_options_param.py +++ b/src/openai/types/chat/chat_completion_stream_options_param.py @@ -8,6 +8,17 @@ class ChatCompletionStreamOptionsParam(TypedDict, total=False): + include_obfuscation: bool + """When true, stream obfuscation will be enabled. + + Stream obfuscation adds random characters to an `obfuscation` field on streaming + delta events to normalize payload sizes as a mitigation to certain side-channel + attacks. These obfuscation fields are included by default, but add a small + amount of overhead to the data stream. You can set `include_obfuscation` to + false to optimize for bandwidth if you trust the network links between your + application and the OpenAI API. + """ + include_usage: bool """If set, an additional chunk will be streamed before the `data: [DONE]` message. diff --git a/src/openai/types/chat/chat_completion_tool_choice_option_param.py b/src/openai/types/chat/chat_completion_tool_choice_option_param.py index 7dedf041b7..f3bb0a46df 100644 --- a/src/openai/types/chat/chat_completion_tool_choice_option_param.py +++ b/src/openai/types/chat/chat_completion_tool_choice_option_param.py @@ -6,9 +6,14 @@ from typing_extensions import Literal, TypeAlias from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam +from .chat_completion_allowed_tool_choice_param import ChatCompletionAllowedToolChoiceParam +from .chat_completion_named_tool_choice_custom_param import ChatCompletionNamedToolChoiceCustomParam __all__ = ["ChatCompletionToolChoiceOptionParam"] ChatCompletionToolChoiceOptionParam: TypeAlias = Union[ - Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam + Literal["none", "auto", "required"], + ChatCompletionAllowedToolChoiceParam, + ChatCompletionNamedToolChoiceParam, + ChatCompletionNamedToolChoiceCustomParam, ] diff --git a/src/openai/types/chat/chat_completion_tool_param.py b/src/openai/types/chat/chat_completion_tool_param.py index 6c2b1a36f0..7cd9743ea3 100644 --- a/src/openai/types/chat/chat_completion_tool_param.py +++ b/src/openai/types/chat/chat_completion_tool_param.py @@ -2,15 +2,12 @@ from __future__ import annotations -from typing_extensions import Literal, Required, TypedDict +from typing import Union +from typing_extensions import TypeAlias -from ..shared_params.function_definition import FunctionDefinition +from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam +from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam __all__ = ["ChatCompletionToolParam"] - -class ChatCompletionToolParam(TypedDict, total=False): - function: Required[FunctionDefinition] - - type: Required[Literal["function"]] - """The type of the tool. Currently, only `function` is supported.""" +ChatCompletionToolParam: TypeAlias = Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 20d7c187f8..011067af1a 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -185,12 +185,12 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ reasoning_effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ response_format: ResponseFormat @@ -287,9 +287,9 @@ class CompletionCreateParamsBase(TypedDict, total=False): tools: Iterable[ChatCompletionToolParam] """A list of tools the model may call. - Currently, only functions are supported as a tool. Use this to provide a list of - functions the model may generate JSON inputs for. A max of 128 functions are - supported. + You can provide either + [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) + or [function tools](https://platform.openai.com/docs/guides/function-calling). """ top_logprobs: Optional[int] @@ -317,6 +317,14 @@ class CompletionCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + web_search_options: WebSearchOptions """ This tool searches the web for relevant results to use in a response. Learn more diff --git a/src/openai/types/chat/parsed_function_tool_call.py b/src/openai/types/chat/parsed_function_tool_call.py index 3e90789f85..e06b3546cb 100644 --- a/src/openai/types/chat/parsed_function_tool_call.py +++ b/src/openai/types/chat/parsed_function_tool_call.py @@ -2,7 +2,7 @@ from typing import Optional -from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall +from .chat_completion_message_function_tool_call import Function, ChatCompletionMessageFunctionToolCall __all__ = ["ParsedFunctionToolCall", "ParsedFunction"] @@ -24,6 +24,6 @@ class ParsedFunction(Function): """ -class ParsedFunctionToolCall(ChatCompletionMessageToolCall): +class ParsedFunctionToolCall(ChatCompletionMessageFunctionToolCall): function: ParsedFunction """The function that the model called.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index a0eaa5addb..bb39d1d3e5 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -6,10 +6,10 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata -from ..chat.chat_completion_tool import ChatCompletionTool from ..shared.response_format_text import ResponseFormatText from ..responses.easy_input_message import EasyInputMessage from ..responses.response_input_text import ResponseInputText +from ..chat.chat_completion_function_tool import ChatCompletionFunctionTool from ..shared.response_format_json_object import ResponseFormatJSONObject from ..shared.response_format_json_schema import ResponseFormatJSONSchema @@ -186,7 +186,7 @@ class SamplingParams(BaseModel): temperature: Optional[float] = None """A higher temperature increases randomness in the outputs.""" - tools: Optional[List[ChatCompletionTool]] = None + tools: Optional[List[ChatCompletionFunctionTool]] = None """A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index 8892b68b17..7c71ecbe88 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -6,10 +6,10 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata -from ..chat.chat_completion_tool_param import ChatCompletionToolParam from ..responses.easy_input_message_param import EasyInputMessageParam from ..shared_params.response_format_text import ResponseFormatText from ..responses.response_input_text_param import ResponseInputTextParam +from ..chat.chat_completion_function_tool_param import ChatCompletionFunctionToolParam from ..shared_params.response_format_json_object import ResponseFormatJSONObject from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema @@ -180,7 +180,7 @@ class SamplingParams(TypedDict, total=False): temperature: float """A higher temperature increases randomness in the outputs.""" - tools: Iterable[ChatCompletionToolParam] + tools: Iterable[ChatCompletionFunctionToolParam] """A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 2e502ed69f..74d8688081 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -5,6 +5,7 @@ from .tool import Tool as Tool from .response import Response as Response from .tool_param import ToolParam as ToolParam +from .custom_tool import CustomTool as CustomTool from .computer_tool import ComputerTool as ComputerTool from .function_tool import FunctionTool as FunctionTool from .response_item import ResponseItem as ResponseItem @@ -23,15 +24,18 @@ from .tool_choice_mcp import ToolChoiceMcp as ToolChoiceMcp from .web_search_tool import WebSearchTool as WebSearchTool from .file_search_tool import FileSearchTool as FileSearchTool +from .custom_tool_param import CustomToolParam as CustomToolParam from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes from .easy_input_message import EasyInputMessage as EasyInputMessage from .response_item_list import ResponseItemList as ResponseItemList +from .tool_choice_custom import ToolChoiceCustom as ToolChoiceCustom from .computer_tool_param import ComputerToolParam as ComputerToolParam from .function_tool_param import FunctionToolParam as FunctionToolParam from .response_includable import ResponseIncludable as ResponseIncludable from .response_input_file import ResponseInputFile as ResponseInputFile from .response_input_item import ResponseInputItem as ResponseInputItem from .response_input_text import ResponseInputText as ResponseInputText +from .tool_choice_allowed import ToolChoiceAllowed as ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions as ToolChoiceOptions from .response_error_event import ResponseErrorEvent as ResponseErrorEvent from .response_input_image import ResponseInputImage as ResponseInputImage @@ -59,12 +63,15 @@ from .response_completed_event import ResponseCompletedEvent as ResponseCompletedEvent from .response_retrieve_params import ResponseRetrieveParams as ResponseRetrieveParams from .response_text_done_event import ResponseTextDoneEvent as ResponseTextDoneEvent +from .tool_choice_custom_param import ToolChoiceCustomParam as ToolChoiceCustomParam from .response_audio_done_event import ResponseAudioDoneEvent as ResponseAudioDoneEvent +from .response_custom_tool_call import ResponseCustomToolCall as ResponseCustomToolCall from .response_incomplete_event import ResponseIncompleteEvent as ResponseIncompleteEvent from .response_input_file_param import ResponseInputFileParam as ResponseInputFileParam from .response_input_item_param import ResponseInputItemParam as ResponseInputItemParam from .response_input_text_param import ResponseInputTextParam as ResponseInputTextParam from .response_text_delta_event import ResponseTextDeltaEvent as ResponseTextDeltaEvent +from .tool_choice_allowed_param import ToolChoiceAllowedParam as ToolChoiceAllowedParam from .response_audio_delta_event import ResponseAudioDeltaEvent as ResponseAudioDeltaEvent from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam @@ -84,8 +91,10 @@ from .response_reasoning_item_param import ResponseReasoningItemParam as ResponseReasoningItemParam from .response_file_search_tool_call import ResponseFileSearchToolCall as ResponseFileSearchToolCall from .response_mcp_call_failed_event import ResponseMcpCallFailedEvent as ResponseMcpCallFailedEvent +from .response_custom_tool_call_param import ResponseCustomToolCallParam as ResponseCustomToolCallParam from .response_output_item_done_event import ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent from .response_content_part_done_event import ResponseContentPartDoneEvent as ResponseContentPartDoneEvent +from .response_custom_tool_call_output import ResponseCustomToolCallOutput as ResponseCustomToolCallOutput from .response_function_tool_call_item import ResponseFunctionToolCallItem as ResponseFunctionToolCallItem from .response_output_item_added_event import ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent from .response_computer_tool_call_param import ResponseComputerToolCallParam as ResponseComputerToolCallParam @@ -105,6 +114,9 @@ from .response_audio_transcript_delta_event import ( ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, ) +from .response_custom_tool_call_output_param import ( + ResponseCustomToolCallOutputParam as ResponseCustomToolCallOutputParam, +) from .response_mcp_call_arguments_done_event import ( ResponseMcpCallArgumentsDoneEvent as ResponseMcpCallArgumentsDoneEvent, ) @@ -153,6 +165,9 @@ from .response_mcp_list_tools_in_progress_event import ( ResponseMcpListToolsInProgressEvent as ResponseMcpListToolsInProgressEvent, ) +from .response_custom_tool_call_input_done_event import ( + ResponseCustomToolCallInputDoneEvent as ResponseCustomToolCallInputDoneEvent, +) from .response_reasoning_summary_part_done_event import ( ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent, ) @@ -162,6 +177,9 @@ from .response_web_search_call_in_progress_event import ( ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent, ) +from .response_custom_tool_call_input_delta_event import ( + ResponseCustomToolCallInputDeltaEvent as ResponseCustomToolCallInputDeltaEvent, +) from .response_file_search_call_in_progress_event import ( ResponseFileSearchCallInProgressEvent as ResponseFileSearchCallInProgressEvent, ) diff --git a/src/openai/types/responses/custom_tool.py b/src/openai/types/responses/custom_tool.py new file mode 100644 index 0000000000..c16ae715eb --- /dev/null +++ b/src/openai/types/responses/custom_tool.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.custom_tool_input_format import CustomToolInputFormat + +__all__ = ["CustomTool"] + + +class CustomTool(BaseModel): + name: str + """The name of the custom tool, used to identify it in tool calls.""" + + type: Literal["custom"] + """The type of the custom tool. Always `custom`.""" + + description: Optional[str] = None + """Optional description of the custom tool, used to provide more context.""" + + format: Optional[CustomToolInputFormat] = None + """The input format for the custom tool. Default is unconstrained text.""" diff --git a/src/openai/types/responses/custom_tool_param.py b/src/openai/types/responses/custom_tool_param.py new file mode 100644 index 0000000000..2afc8b19b8 --- /dev/null +++ b/src/openai/types/responses/custom_tool_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.custom_tool_input_format import CustomToolInputFormat + +__all__ = ["CustomToolParam"] + + +class CustomToolParam(TypedDict, total=False): + name: Required[str] + """The name of the custom tool, used to identify it in tool calls.""" + + type: Required[Literal["custom"]] + """The type of the custom tool. Always `custom`.""" + + description: str + """Optional description of the custom tool, used to provide more context.""" + + format: CustomToolInputFormat + """The input format for the custom tool. Default is unconstrained text.""" diff --git a/src/openai/types/responses/parsed_response.py b/src/openai/types/responses/parsed_response.py index e59e86d2b7..1d9db361dd 100644 --- a/src/openai/types/responses/parsed_response.py +++ b/src/openai/types/responses/parsed_response.py @@ -19,6 +19,7 @@ from .response_output_message import ResponseOutputMessage from .response_output_refusal import ResponseOutputRefusal from .response_reasoning_item import ResponseReasoningItem +from .response_custom_tool_call import ResponseCustomToolCall from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch @@ -73,6 +74,7 @@ class ParsedResponseFunctionToolCall(ResponseFunctionToolCall): LocalShellCallAction, McpListTools, ResponseCodeInterpreterToolCall, + ResponseCustomToolCall, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 7db466dfe7..07a82cb4ac 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -13,7 +13,9 @@ from ..shared.metadata import Metadata from ..shared.reasoning import Reasoning from .tool_choice_types import ToolChoiceTypes +from .tool_choice_custom import ToolChoiceCustom from .response_input_item import ResponseInputItem +from .tool_choice_allowed import ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions from .response_output_item import ResponseOutputItem from .response_text_config import ResponseTextConfig @@ -28,7 +30,9 @@ class IncompleteDetails(BaseModel): """The reason why the response is incomplete.""" -ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp] +ToolChoice: TypeAlias = Union[ + ToolChoiceOptions, ToolChoiceAllowed, ToolChoiceTypes, ToolChoiceFunction, ToolChoiceMcp, ToolChoiceCustom +] class Response(BaseModel): @@ -116,8 +120,10 @@ class Response(BaseModel): Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. """ top_p: Optional[float] = None @@ -130,8 +136,8 @@ class Response(BaseModel): """ background: Optional[bool] = None - """Whether to run the model response in the background. - + """ + Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). """ @@ -253,18 +259,3 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ - @property - def output_text(self) -> str: - """Convenience property that aggregates all `output_text` items from the `output` - list. - - If no `output_text` content blocks exist, then an empty string is returned. - """ - texts: List[str] = [] - for output in self.output: - if output.type == "message": - for content in output.content: - if content.type == "output_text": - texts.append(content.text) - - return "".join(texts) diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 4a78d7c028..53af325328 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -14,12 +14,15 @@ from ..shared_params.metadata import Metadata from .tool_choice_types_param import ToolChoiceTypesParam from ..shared_params.reasoning import Reasoning +from .tool_choice_custom_param import ToolChoiceCustomParam +from .tool_choice_allowed_param import ToolChoiceAllowedParam from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam from ..shared_params.responses_model import ResponsesModel __all__ = [ "ResponseCreateParamsBase", + "StreamOptions", "ToolChoice", "ResponseCreateParamsNonStreaming", "ResponseCreateParamsStreaming", @@ -28,8 +31,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): background: Optional[bool] - """Whether to run the model response in the background. - + """ + Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). """ @@ -169,6 +172,9 @@ class ResponseCreateParamsBase(TypedDict, total=False): store: Optional[bool] """Whether to store the generated model response for later retrieval via API.""" + stream_options: Optional[StreamOptions] + """Options for streaming responses. Only set this when you set `stream: true`.""" + temperature: Optional[float] """What sampling temperature to use, between 0 and 2. @@ -207,8 +213,10 @@ class ResponseCreateParamsBase(TypedDict, total=False): Learn more about [built-in tools](https://platform.openai.com/docs/guides/tools). - **Function calls (custom tools)**: Functions that are defined by you, enabling - the model to call your own code. Learn more about + the model to call your own code with strongly typed arguments and outputs. + Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling). + You can also use custom tools to call your own code. """ top_logprobs: Optional[int] @@ -245,8 +253,36 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + +class StreamOptions(TypedDict, total=False): + include_obfuscation: bool + """When true, stream obfuscation will be enabled. + + Stream obfuscation adds random characters to an `obfuscation` field on streaming + delta events to normalize payload sizes as a mitigation to certain side-channel + attacks. These obfuscation fields are included by default, but add a small + amount of overhead to the data stream. You can set `include_obfuscation` to + false to optimize for bandwidth if you trust the network links between your + application and the OpenAI API. + """ + -ToolChoice: TypeAlias = Union[ToolChoiceOptions, ToolChoiceTypesParam, ToolChoiceFunctionParam, ToolChoiceMcpParam] +ToolChoice: TypeAlias = Union[ + ToolChoiceOptions, + ToolChoiceAllowedParam, + ToolChoiceTypesParam, + ToolChoiceFunctionParam, + ToolChoiceMcpParam, + ToolChoiceCustomParam, +] class ResponseCreateParamsNonStreaming(ResponseCreateParamsBase, total=False): diff --git a/src/openai/types/responses/response_custom_tool_call.py b/src/openai/types/responses/response_custom_tool_call.py new file mode 100644 index 0000000000..38c650e662 --- /dev/null +++ b/src/openai/types/responses/response_custom_tool_call.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCall"] + + +class ResponseCustomToolCall(BaseModel): + call_id: str + """An identifier used to map this custom tool call to a tool call output.""" + + input: str + """The input for the custom tool call generated by the model.""" + + name: str + """The name of the custom tool being called.""" + + type: Literal["custom_tool_call"] + """The type of the custom tool call. Always `custom_tool_call`.""" + + id: Optional[str] = None + """The unique ID of the custom tool call in the OpenAI platform.""" diff --git a/src/openai/types/responses/response_custom_tool_call_input_delta_event.py b/src/openai/types/responses/response_custom_tool_call_input_delta_event.py new file mode 100644 index 0000000000..6c33102d75 --- /dev/null +++ b/src/openai/types/responses/response_custom_tool_call_input_delta_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCallInputDeltaEvent"] + + +class ResponseCustomToolCallInputDeltaEvent(BaseModel): + delta: str + """The incremental input data (delta) for the custom tool call.""" + + item_id: str + """Unique identifier for the API item associated with this event.""" + + output_index: int + """The index of the output this delta applies to.""" + + sequence_number: int + """The sequence number of this event.""" + + type: Literal["response.custom_tool_call_input.delta"] + """The event type identifier.""" diff --git a/src/openai/types/responses/response_custom_tool_call_input_done_event.py b/src/openai/types/responses/response_custom_tool_call_input_done_event.py new file mode 100644 index 0000000000..35a2fee22b --- /dev/null +++ b/src/openai/types/responses/response_custom_tool_call_input_done_event.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCallInputDoneEvent"] + + +class ResponseCustomToolCallInputDoneEvent(BaseModel): + input: str + """The complete input data for the custom tool call.""" + + item_id: str + """Unique identifier for the API item associated with this event.""" + + output_index: int + """The index of the output this event applies to.""" + + sequence_number: int + """The sequence number of this event.""" + + type: Literal["response.custom_tool_call_input.done"] + """The event type identifier.""" diff --git a/src/openai/types/responses/response_custom_tool_call_output.py b/src/openai/types/responses/response_custom_tool_call_output.py new file mode 100644 index 0000000000..a2b4cc3000 --- /dev/null +++ b/src/openai/types/responses/response_custom_tool_call_output.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseCustomToolCallOutput"] + + +class ResponseCustomToolCallOutput(BaseModel): + call_id: str + """The call ID, used to map this custom tool call output to a custom tool call.""" + + output: str + """The output from the custom tool call generated by your code.""" + + type: Literal["custom_tool_call_output"] + """The type of the custom tool call output. Always `custom_tool_call_output`.""" + + id: Optional[str] = None + """The unique ID of the custom tool call output in the OpenAI platform.""" diff --git a/src/openai/types/responses/response_custom_tool_call_output_param.py b/src/openai/types/responses/response_custom_tool_call_output_param.py new file mode 100644 index 0000000000..d52c525467 --- /dev/null +++ b/src/openai/types/responses/response_custom_tool_call_output_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCustomToolCallOutputParam"] + + +class ResponseCustomToolCallOutputParam(TypedDict, total=False): + call_id: Required[str] + """The call ID, used to map this custom tool call output to a custom tool call.""" + + output: Required[str] + """The output from the custom tool call generated by your code.""" + + type: Required[Literal["custom_tool_call_output"]] + """The type of the custom tool call output. Always `custom_tool_call_output`.""" + + id: str + """The unique ID of the custom tool call output in the OpenAI platform.""" diff --git a/src/openai/types/responses/response_custom_tool_call_param.py b/src/openai/types/responses/response_custom_tool_call_param.py new file mode 100644 index 0000000000..e15beac29f --- /dev/null +++ b/src/openai/types/responses/response_custom_tool_call_param.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseCustomToolCallParam"] + + +class ResponseCustomToolCallParam(TypedDict, total=False): + call_id: Required[str] + """An identifier used to map this custom tool call to a tool call output.""" + + input: Required[str] + """The input for the custom tool call generated by the model.""" + + name: Required[str] + """The name of the custom tool being called.""" + + type: Required[Literal["custom_tool_call"]] + """The type of the custom tool call. Always `custom_tool_call`.""" + + id: str + """The unique ID of the custom tool call in the OpenAI platform.""" diff --git a/src/openai/types/responses/response_input_item.py b/src/openai/types/responses/response_input_item.py index 5fbd7c274b..d2b454fd2c 100644 --- a/src/openai/types/responses/response_input_item.py +++ b/src/openai/types/responses/response_input_item.py @@ -8,10 +8,12 @@ from .easy_input_message import EasyInputMessage from .response_output_message import ResponseOutputMessage from .response_reasoning_item import ResponseReasoningItem +from .response_custom_tool_call import ResponseCustomToolCall from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch from .response_file_search_tool_call import ResponseFileSearchToolCall +from .response_custom_tool_call_output import ResponseCustomToolCallOutput from .response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall from .response_input_message_content_list import ResponseInputMessageContentList from .response_computer_tool_call_output_screenshot import ResponseComputerToolCallOutputScreenshot @@ -299,6 +301,8 @@ class ItemReference(BaseModel): McpApprovalRequest, McpApprovalResponse, McpCall, + ResponseCustomToolCallOutput, + ResponseCustomToolCall, ItemReference, ], PropertyInfo(discriminator="type"), diff --git a/src/openai/types/responses/response_input_item_param.py b/src/openai/types/responses/response_input_item_param.py index 70cd9116a9..0d5dbda85c 100644 --- a/src/openai/types/responses/response_input_item_param.py +++ b/src/openai/types/responses/response_input_item_param.py @@ -8,10 +8,12 @@ from .easy_input_message_param import EasyInputMessageParam from .response_output_message_param import ResponseOutputMessageParam from .response_reasoning_item_param import ResponseReasoningItemParam +from .response_custom_tool_call_param import ResponseCustomToolCallParam from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam @@ -298,5 +300,7 @@ class ItemReference(TypedDict, total=False): McpApprovalRequest, McpApprovalResponse, McpCall, + ResponseCustomToolCallOutputParam, + ResponseCustomToolCallParam, ItemReference, ] diff --git a/src/openai/types/responses/response_input_param.py b/src/openai/types/responses/response_input_param.py index 024998671f..6ff36a4238 100644 --- a/src/openai/types/responses/response_input_param.py +++ b/src/openai/types/responses/response_input_param.py @@ -8,10 +8,12 @@ from .easy_input_message_param import EasyInputMessageParam from .response_output_message_param import ResponseOutputMessageParam from .response_reasoning_item_param import ResponseReasoningItemParam +from .response_custom_tool_call_param import ResponseCustomToolCallParam from .response_computer_tool_call_param import ResponseComputerToolCallParam from .response_function_tool_call_param import ResponseFunctionToolCallParam from .response_function_web_search_param import ResponseFunctionWebSearchParam from .response_file_search_tool_call_param import ResponseFileSearchToolCallParam +from .response_custom_tool_call_output_param import ResponseCustomToolCallOutputParam from .response_code_interpreter_tool_call_param import ResponseCodeInterpreterToolCallParam from .response_input_message_content_list_param import ResponseInputMessageContentListParam from .response_computer_tool_call_output_screenshot_param import ResponseComputerToolCallOutputScreenshotParam @@ -299,6 +301,8 @@ class ItemReference(TypedDict, total=False): McpApprovalRequest, McpApprovalResponse, McpCall, + ResponseCustomToolCallOutputParam, + ResponseCustomToolCallParam, ItemReference, ] diff --git a/src/openai/types/responses/response_output_item.py b/src/openai/types/responses/response_output_item.py index 62f8f6fb3f..2d3ee7b64e 100644 --- a/src/openai/types/responses/response_output_item.py +++ b/src/openai/types/responses/response_output_item.py @@ -7,6 +7,7 @@ from ..._models import BaseModel from .response_output_message import ResponseOutputMessage from .response_reasoning_item import ResponseReasoningItem +from .response_custom_tool_call import ResponseCustomToolCall from .response_computer_tool_call import ResponseComputerToolCall from .response_function_tool_call import ResponseFunctionToolCall from .response_function_web_search import ResponseFunctionWebSearch @@ -161,6 +162,7 @@ class McpApprovalRequest(BaseModel): McpCall, McpListTools, McpApprovalRequest, + ResponseCustomToolCall, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/response_retrieve_params.py b/src/openai/types/responses/response_retrieve_params.py index a092bd7fb8..4013db85ce 100644 --- a/src/openai/types/responses/response_retrieve_params.py +++ b/src/openai/types/responses/response_retrieve_params.py @@ -17,6 +17,17 @@ class ResponseRetrieveParamsBase(TypedDict, total=False): See the `include` parameter for Response creation above for more information. """ + include_obfuscation: bool + """When true, stream obfuscation will be enabled. + + Stream obfuscation adds random characters to an `obfuscation` field on streaming + delta events to normalize payload sizes as a mitigation to certain side-channel + attacks. These obfuscation fields are included by default, but add a small + amount of overhead to the data stream. You can set `include_obfuscation` to + false to optimize for bandwidth if you trust the network links between your + application and the OpenAI API. + """ + starting_after: int """The sequence number of the event after which to start streaming.""" diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py index d62cf8969b..c0a317cd9d 100644 --- a/src/openai/types/responses/response_stream_event.py +++ b/src/openai/types/responses/response_stream_event.py @@ -40,9 +40,11 @@ from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent from .response_image_gen_call_in_progress_event import ResponseImageGenCallInProgressEvent from .response_mcp_list_tools_in_progress_event import ResponseMcpListToolsInProgressEvent +from .response_custom_tool_call_input_done_event import ResponseCustomToolCallInputDoneEvent from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent +from .response_custom_tool_call_input_delta_event import ResponseCustomToolCallInputDeltaEvent from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent from .response_image_gen_call_partial_image_event import ResponseImageGenCallPartialImageEvent @@ -111,6 +113,8 @@ ResponseMcpListToolsInProgressEvent, ResponseOutputTextAnnotationAddedEvent, ResponseQueuedEvent, + ResponseCustomToolCallInputDeltaEvent, + ResponseCustomToolCallInputDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index 4399871e29..455ba01666 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -5,6 +5,7 @@ from ..._utils import PropertyInfo from ..._models import BaseModel +from .custom_tool import CustomTool from .computer_tool import ComputerTool from .function_tool import FunctionTool from .web_search_tool import WebSearchTool @@ -177,6 +178,16 @@ class LocalShell(BaseModel): Tool: TypeAlias = Annotated[ - Union[FunctionTool, FileSearchTool, WebSearchTool, ComputerTool, Mcp, CodeInterpreter, ImageGeneration, LocalShell], + Union[ + FunctionTool, + FileSearchTool, + WebSearchTool, + ComputerTool, + Mcp, + CodeInterpreter, + ImageGeneration, + LocalShell, + CustomTool, + ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/tool_choice_allowed.py b/src/openai/types/responses/tool_choice_allowed.py new file mode 100644 index 0000000000..d7921dcb2a --- /dev/null +++ b/src/openai/types/responses/tool_choice_allowed.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceAllowed"] + + +class ToolChoiceAllowed(BaseModel): + mode: Literal["auto", "required"] + """Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + """ + + tools: List[Dict[str, object]] + """A list of tool definitions that the model should be allowed to call. + + For the Responses API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "name": "get_weather" }, + { "type": "mcp", "server_label": "deepwiki" }, + { "type": "image_generation" } + ] + ``` + """ + + type: Literal["allowed_tools"] + """Allowed tool configuration type. Always `allowed_tools`.""" diff --git a/src/openai/types/responses/tool_choice_allowed_param.py b/src/openai/types/responses/tool_choice_allowed_param.py new file mode 100644 index 0000000000..0712cab43b --- /dev/null +++ b/src/openai/types/responses/tool_choice_allowed_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceAllowedParam"] + + +class ToolChoiceAllowedParam(TypedDict, total=False): + mode: Required[Literal["auto", "required"]] + """Constrains the tools available to the model to a pre-defined set. + + `auto` allows the model to pick from among the allowed tools and generate a + message. + + `required` requires the model to call one or more of the allowed tools. + """ + + tools: Required[Iterable[Dict[str, object]]] + """A list of tool definitions that the model should be allowed to call. + + For the Responses API, the list of tool definitions might look like: + + ```json + [ + { "type": "function", "name": "get_weather" }, + { "type": "mcp", "server_label": "deepwiki" }, + { "type": "image_generation" } + ] + ``` + """ + + type: Required[Literal["allowed_tools"]] + """Allowed tool configuration type. Always `allowed_tools`.""" diff --git a/src/openai/types/responses/tool_choice_custom.py b/src/openai/types/responses/tool_choice_custom.py new file mode 100644 index 0000000000..d600e53616 --- /dev/null +++ b/src/openai/types/responses/tool_choice_custom.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ToolChoiceCustom"] + + +class ToolChoiceCustom(BaseModel): + name: str + """The name of the custom tool to call.""" + + type: Literal["custom"] + """For custom tool calling, the type is always `custom`.""" diff --git a/src/openai/types/responses/tool_choice_custom_param.py b/src/openai/types/responses/tool_choice_custom_param.py new file mode 100644 index 0000000000..55bc53b730 --- /dev/null +++ b/src/openai/types/responses/tool_choice_custom_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ToolChoiceCustomParam"] + + +class ToolChoiceCustomParam(TypedDict, total=False): + name: Required[str] + """The name of the custom tool to call.""" + + type: Required[Literal["custom"]] + """For custom tool calling, the type is always `custom`.""" diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index a977f06e3f..ef9ec2ae36 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -5,6 +5,7 @@ from typing import Dict, List, Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from .custom_tool_param import CustomToolParam from .computer_tool_param import ComputerToolParam from .function_tool_param import FunctionToolParam from .web_search_tool_param import WebSearchToolParam @@ -186,6 +187,7 @@ class LocalShell(TypedDict, total=False): CodeInterpreter, ImageGeneration, LocalShell, + CustomToolParam, ] diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index 6ad0ed5e01..2930b9ae3b 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -12,5 +12,8 @@ from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText +from .custom_tool_input_format import CustomToolInputFormat as CustomToolInputFormat from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema +from .response_format_text_python import ResponseFormatTextPython as ResponseFormatTextPython +from .response_format_text_grammar import ResponseFormatTextGrammar as ResponseFormatTextGrammar diff --git a/src/openai/types/shared/chat_model.py b/src/openai/types/shared/chat_model.py index 309368a384..727c60c1c0 100644 --- a/src/openai/types/shared/chat_model.py +++ b/src/openai/types/shared/chat_model.py @@ -5,6 +5,13 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", diff --git a/src/openai/types/shared/custom_tool_input_format.py b/src/openai/types/shared/custom_tool_input_format.py new file mode 100644 index 0000000000..53c8323ed2 --- /dev/null +++ b/src/openai/types/shared/custom_tool_input_format.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel + +__all__ = ["CustomToolInputFormat", "Text", "Grammar"] + + +class Text(BaseModel): + type: Literal["text"] + """Unconstrained text format. Always `text`.""" + + +class Grammar(BaseModel): + definition: str + """The grammar definition.""" + + syntax: Literal["lark", "regex"] + """The syntax of the grammar definition. One of `lark` or `regex`.""" + + type: Literal["grammar"] + """Grammar format. Always `grammar`.""" + + +CustomToolInputFormat: TypeAlias = Annotated[Union[Text, Grammar], PropertyInfo(discriminator="type")] diff --git a/src/openai/types/shared/reasoning.py b/src/openai/types/shared/reasoning.py index 107aab2e4a..24ce301526 100644 --- a/src/openai/types/shared/reasoning.py +++ b/src/openai/types/shared/reasoning.py @@ -11,12 +11,12 @@ class Reasoning(BaseModel): effort: Optional[ReasoningEffort] = None - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] = None diff --git a/src/openai/types/shared/reasoning_effort.py b/src/openai/types/shared/reasoning_effort.py index ace21b67e4..4b960cd7e6 100644 --- a/src/openai/types/shared/reasoning_effort.py +++ b/src/openai/types/shared/reasoning_effort.py @@ -5,4 +5,4 @@ __all__ = ["ReasoningEffort"] -ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] +ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] diff --git a/src/openai/types/shared/response_format_text_grammar.py b/src/openai/types/shared/response_format_text_grammar.py new file mode 100644 index 0000000000..b02f99c1b8 --- /dev/null +++ b/src/openai/types/shared/response_format_text_grammar.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatTextGrammar"] + + +class ResponseFormatTextGrammar(BaseModel): + grammar: str + """The custom grammar for the model to follow.""" + + type: Literal["grammar"] + """The type of response format being defined. Always `grammar`.""" diff --git a/src/openai/types/shared/response_format_text_python.py b/src/openai/types/shared/response_format_text_python.py new file mode 100644 index 0000000000..4cd18d46fa --- /dev/null +++ b/src/openai/types/shared/response_format_text_python.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatTextPython"] + + +class ResponseFormatTextPython(BaseModel): + type: Literal["python"] + """The type of response format being defined. Always `python`.""" diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index 8894710807..b6c0912b0f 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -10,5 +10,6 @@ from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters from .response_format_text import ResponseFormatText as ResponseFormatText +from .custom_tool_input_format import CustomToolInputFormat as CustomToolInputFormat from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/src/openai/types/shared_params/chat_model.py b/src/openai/types/shared_params/chat_model.py index 6cd8e7f91f..a1e5ab9f30 100644 --- a/src/openai/types/shared_params/chat_model.py +++ b/src/openai/types/shared_params/chat_model.py @@ -7,6 +7,13 @@ __all__ = ["ChatModel"] ChatModel: TypeAlias = Literal[ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-2025-08-07", + "gpt-5-mini-2025-08-07", + "gpt-5-nano-2025-08-07", + "gpt-5-chat-latest", "gpt-4.1", "gpt-4.1-mini", "gpt-4.1-nano", diff --git a/src/openai/types/shared_params/custom_tool_input_format.py b/src/openai/types/shared_params/custom_tool_input_format.py new file mode 100644 index 0000000000..37df393e39 --- /dev/null +++ b/src/openai/types/shared_params/custom_tool_input_format.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +__all__ = ["CustomToolInputFormat", "Text", "Grammar"] + + +class Text(TypedDict, total=False): + type: Required[Literal["text"]] + """Unconstrained text format. Always `text`.""" + + +class Grammar(TypedDict, total=False): + definition: Required[str] + """The grammar definition.""" + + syntax: Required[Literal["lark", "regex"]] + """The syntax of the grammar definition. One of `lark` or `regex`.""" + + type: Required[Literal["grammar"]] + """Grammar format. Always `grammar`.""" + + +CustomToolInputFormat: TypeAlias = Union[Text, Grammar] diff --git a/src/openai/types/shared_params/reasoning.py b/src/openai/types/shared_params/reasoning.py index 73e1a008df..7eab2c76f7 100644 --- a/src/openai/types/shared_params/reasoning.py +++ b/src/openai/types/shared_params/reasoning.py @@ -12,12 +12,12 @@ class Reasoning(TypedDict, total=False): effort: Optional[ReasoningEffort] - """**o-series models only** - + """ Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently - supported values are `low`, `medium`, and `high`. Reducing reasoning effort can - result in faster responses and fewer tokens used on reasoning in a response. + supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning + effort can result in faster responses and fewer tokens used on reasoning in a + response. """ generate_summary: Optional[Literal["auto", "concise", "detailed"]] diff --git a/src/openai/types/shared_params/reasoning_effort.py b/src/openai/types/shared_params/reasoning_effort.py index 6052c5ae15..4c095a28d7 100644 --- a/src/openai/types/shared_params/reasoning_effort.py +++ b/src/openai/types/shared_params/reasoning_effort.py @@ -7,4 +7,4 @@ __all__ = ["ReasoningEffort"] -ReasoningEffort: TypeAlias = Optional[Literal["low", "medium", "high"]] +ReasoningEffort: TypeAlias = Optional[Literal["minimal", "low", "medium", "high"]] diff --git a/tests/api_resources/beta/test_assistants.py b/tests/api_resources/beta/test_assistants.py index 8aeb654e38..875e024a51 100644 --- a/tests/api_resources/beta/test_assistants.py +++ b/tests/api_resources/beta/test_assistants.py @@ -36,7 +36,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: instructions="instructions", metadata={"foo": "string"}, name="name", - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", temperature=1, tool_resources={ @@ -135,7 +135,7 @@ def test_method_update_with_all_params(self, client: OpenAI) -> None: metadata={"foo": "string"}, model="string", name="name", - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", temperature=1, tool_resources={ @@ -272,7 +272,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> instructions="instructions", metadata={"foo": "string"}, name="name", - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", temperature=1, tool_resources={ @@ -371,7 +371,7 @@ async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> metadata={"foo": "string"}, model="string", name="name", - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", temperature=1, tool_resources={ diff --git a/tests/api_resources/beta/threads/test_runs.py b/tests/api_resources/beta/threads/test_runs.py index 86a296627e..440486bac5 100644 --- a/tests/api_resources/beta/threads/test_runs.py +++ b/tests/api_resources/beta/threads/test_runs.py @@ -59,7 +59,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: metadata={"foo": "string"}, model="string", parallel_tool_calls=True, - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", stream=False, temperature=1, @@ -150,7 +150,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: metadata={"foo": "string"}, model="string", parallel_tool_calls=True, - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", temperature=1, tool_choice="none", @@ -609,7 +609,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn metadata={"foo": "string"}, model="string", parallel_tool_calls=True, - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", stream=False, temperature=1, @@ -700,7 +700,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn metadata={"foo": "string"}, model="string", parallel_tool_calls=True, - reasoning_effort="low", + reasoning_effort="minimal", response_format="auto", temperature=1, tool_choice="none", diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 2758d980ed..358ea18cbb 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -73,7 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - reasoning_effort="low", + reasoning_effort="minimal", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", seed=-9007199254740991, @@ -81,7 +81,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: stop="\n", store=True, stream=False, - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, temperature=1, tool_choice="none", tools=[ @@ -98,6 +101,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: top_logprobs=0, top_p=1, user="user-1234", + verbosity="low", web_search_options={ "search_context_size": "low", "user_location": { @@ -202,14 +206,17 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - reasoning_effort="low", + reasoning_effort="minimal", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", store=True, - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, temperature=1, tool_choice="none", tools=[ @@ -226,6 +233,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: top_logprobs=0, top_p=1, user="user-1234", + verbosity="low", web_search_options={ "search_context_size": "low", "user_location": { @@ -506,7 +514,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - reasoning_effort="low", + reasoning_effort="minimal", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", seed=-9007199254740991, @@ -514,7 +522,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stop="\n", store=True, stream=False, - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, temperature=1, tool_choice="none", tools=[ @@ -531,6 +542,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn top_logprobs=0, top_p=1, user="user-1234", + verbosity="low", web_search_options={ "search_context_size": "low", "user_location": { @@ -635,14 +647,17 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - reasoning_effort="low", + reasoning_effort="minimal", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", store=True, - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, temperature=1, tool_choice="none", tools=[ @@ -659,6 +674,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn top_logprobs=0, top_p=1, user="user-1234", + verbosity="low", web_search_options={ "search_context_size": "low", "user_location": { diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index 1c5271df75..a8fb0e59eb 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -41,7 +41,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: seed=0, stop="\n", stream=False, - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, suffix="test.", temperature=1, top_p=1, @@ -100,7 +103,10 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: presence_penalty=-2, seed=0, stop="\n", - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, suffix="test.", temperature=1, top_p=1, @@ -165,7 +171,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn seed=0, stop="\n", stream=False, - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, suffix="test.", temperature=1, top_p=1, @@ -224,7 +233,10 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn presence_penalty=-2, seed=0, stop="\n", - stream_options={"include_usage": True}, + stream_options={ + "include_obfuscation": True, + "include_usage": True, + }, suffix="test.", temperature=1, top_p=1, diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 63e47d8a69..4f8c88fa27 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -45,7 +45,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: }, prompt_cache_key="prompt-cache-key-1234", reasoning={ - "effort": "low", + "effort": "minimal", "generate_summary": "auto", "summary": "auto", }, @@ -53,6 +53,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: service_tier="auto", store=True, stream=False, + stream_options={"include_obfuscation": True}, temperature=1, text={"format": {"type": "text"}}, tool_choice="none", @@ -69,6 +70,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: top_p=1, truncation="auto", user="user-1234", + verbosity="low", ) assert_matches_type(Response, response, path=["response"]) @@ -120,13 +122,14 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: }, prompt_cache_key="prompt-cache-key-1234", reasoning={ - "effort": "low", + "effort": "minimal", "generate_summary": "auto", "summary": "auto", }, safety_identifier="safety-identifier-1234", service_tier="auto", store=True, + stream_options={"include_obfuscation": True}, temperature=1, text={"format": {"type": "text"}}, tool_choice="none", @@ -143,6 +146,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: top_p=1, truncation="auto", user="user-1234", + verbosity="low", ) response_stream.response.close() @@ -181,6 +185,7 @@ def test_method_retrieve_with_all_params_overload_1(self, client: OpenAI) -> Non response = client.responses.retrieve( response_id="resp_677efb5139a88190b512bc3fef8e535d", include=["code_interpreter_call.outputs"], + include_obfuscation=True, starting_after=0, stream=False, ) @@ -231,6 +236,7 @@ def test_method_retrieve_with_all_params_overload_2(self, client: OpenAI) -> Non response_id="resp_677efb5139a88190b512bc3fef8e535d", stream=True, include=["code_interpreter_call.outputs"], + include_obfuscation=True, starting_after=0, ) response_stream.response.close() @@ -386,7 +392,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn }, prompt_cache_key="prompt-cache-key-1234", reasoning={ - "effort": "low", + "effort": "minimal", "generate_summary": "auto", "summary": "auto", }, @@ -394,6 +400,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn service_tier="auto", store=True, stream=False, + stream_options={"include_obfuscation": True}, temperature=1, text={"format": {"type": "text"}}, tool_choice="none", @@ -410,6 +417,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn top_p=1, truncation="auto", user="user-1234", + verbosity="low", ) assert_matches_type(Response, response, path=["response"]) @@ -461,13 +469,14 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn }, prompt_cache_key="prompt-cache-key-1234", reasoning={ - "effort": "low", + "effort": "minimal", "generate_summary": "auto", "summary": "auto", }, safety_identifier="safety-identifier-1234", service_tier="auto", store=True, + stream_options={"include_obfuscation": True}, temperature=1, text={"format": {"type": "text"}}, tool_choice="none", @@ -484,6 +493,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn top_p=1, truncation="auto", user="user-1234", + verbosity="low", ) await response_stream.response.aclose() @@ -522,6 +532,7 @@ async def test_method_retrieve_with_all_params_overload_1(self, async_client: As response = await async_client.responses.retrieve( response_id="resp_677efb5139a88190b512bc3fef8e535d", include=["code_interpreter_call.outputs"], + include_obfuscation=True, starting_after=0, stream=False, ) @@ -572,6 +583,7 @@ async def test_method_retrieve_with_all_params_overload_2(self, async_client: As response_id="resp_677efb5139a88190b512bc3fef8e535d", stream=True, include=["code_interpreter_call.outputs"], + include_obfuscation=True, starting_after=0, ) await response_stream.response.aclose() From 657f551dbe583ffb259d987dafae12c6211fba06 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 7 Aug 2025 18:11:34 +0100 Subject: [PATCH 49/90] fix(types): correct tool types --- src/openai/lib/streaming/responses/_events.py | 4 ++++ src/openai/types/responses/tool_param.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/openai/lib/streaming/responses/_events.py b/src/openai/lib/streaming/responses/_events.py index de3342ec9d..bdc47b834a 100644 --- a/src/openai/lib/streaming/responses/_events.py +++ b/src/openai/lib/streaming/responses/_events.py @@ -39,9 +39,11 @@ ResponseMcpListToolsInProgressEvent, ResponseWebSearchCallCompletedEvent, ResponseWebSearchCallSearchingEvent, + ResponseCustomToolCallInputDoneEvent, ResponseFileSearchCallCompletedEvent, ResponseFileSearchCallSearchingEvent, ResponseWebSearchCallInProgressEvent, + ResponseCustomToolCallInputDeltaEvent, ResponseFileSearchCallInProgressEvent, ResponseImageGenCallPartialImageEvent, ResponseReasoningSummaryPartDoneEvent, @@ -139,6 +141,8 @@ class ResponseCompletedEvent(RawResponseCompletedEvent, GenericModel, Generic[Te ResponseQueuedEvent, ResponseReasoningTextDeltaEvent, ResponseReasoningTextDoneEvent, + ResponseCustomToolCallInputDeltaEvent, + ResponseCustomToolCallInputDoneEvent, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index ef9ec2ae36..f91e758559 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -5,12 +5,12 @@ from typing import Dict, List, Union, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..chat import ChatCompletionFunctionToolParam from .custom_tool_param import CustomToolParam from .computer_tool_param import ComputerToolParam from .function_tool_param import FunctionToolParam from .web_search_tool_param import WebSearchToolParam from .file_search_tool_param import FileSearchToolParam -from ..chat.chat_completion_tool_param import ChatCompletionToolParam __all__ = [ "ToolParam", @@ -191,4 +191,4 @@ class LocalShell(TypedDict, total=False): ] -ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionToolParam] +ParseableToolParam: TypeAlias = Union[ToolParam, ChatCompletionFunctionToolParam] From 445af1e3d07fcfe1d047ced2436318419b7c889c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 17:12:09 +0000 Subject: [PATCH 50/90] release: 1.99.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 41be9f1017..9472ef89a3 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.1" + ".": "1.99.2" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 4585135511..a6ac2ffb3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 1.99.2 (2025-08-07) + +Full Changelog: [v1.99.1...v1.99.2](https://github.com/openai/openai-python/compare/v1.99.1...v1.99.2) + +### Features + +* **api:** adds GPT-5 and new API features: platform.openai.com/docs/guides/gpt-5 ([ed370d8](https://github.com/openai/openai-python/commit/ed370d805e4d5d1ec14a136f5b2516751277059f)) + + +### Bug Fixes + +* **types:** correct tool types ([0c57bd7](https://github.com/openai/openai-python/commit/0c57bd7f2183a20b714d04edea380a4df0464a40)) + + +### Chores + +* **tests:** bump inline-snapshot dependency ([e236fde](https://github.com/openai/openai-python/commit/e236fde99a335fcaac9760f324e4807ce2cf7cba)) + ## 1.99.1 (2025-08-05) Full Changelog: [v1.99.0...v1.99.1](https://github.com/openai/openai-python/compare/v1.99.0...v1.99.1) diff --git a/pyproject.toml b/pyproject.toml index c71e8c135b..7ea0a63597 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.1" +version = "1.99.2" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 3fa80adba0..088935379f 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.1" # x-release-please-version +__version__ = "1.99.2" # x-release-please-version From e3c0612c2cf39e7289fa3d91116c6eae83e534e6 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 18:27:13 +0000 Subject: [PATCH 51/90] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 9c1b4e4c54..4d8b1f059e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f5c45f4ae5c2075cbc603d6910bba3da31c23714c209fbd3fd82a94f634a126b.yml openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: 9a64321968e21ed72f5c0e02164ea00d +config_hash: e53ea2d984c4e05a57eb0227fa379b2b From e574c12f9e2e738451ac010bdc52f4ee59813cfb Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Thu, 7 Aug 2025 20:22:50 +0100 Subject: [PATCH 52/90] fix(responses): add output_text back --- src/openai/types/responses/response.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 07a82cb4ac..5ebb18fda4 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -259,3 +259,17 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ + @property + def output_text(self) -> str: + """Convenience property that aggregates all `output_text` items from the `output` list. + + If no `output_text` content blocks exist, then an empty string is returned. + """ + texts: List[str] = [] + for output in self.output: + if output.type == "message": + for content in output.content: + if content.type == "output_text": + texts.append(content.text) + + return "".join(texts) From e4ec91e776d0155752ab004432dbcd1ad8a81d98 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 19:23:26 +0000 Subject: [PATCH 53/90] release: 1.99.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 9472ef89a3..62255b70d8 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.2" + ".": "1.99.3" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a6ac2ffb3f..6d06c6548e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.99.3 (2025-08-07) + +Full Changelog: [v1.99.2...v1.99.3](https://github.com/openai/openai-python/compare/v1.99.2...v1.99.3) + +### Bug Fixes + +* **responses:** add output_text back ([585a4f1](https://github.com/openai/openai-python/commit/585a4f15e5a088bf8afee745bc4a7803775ac283)) + ## 1.99.2 (2025-08-07) Full Changelog: [v1.99.1...v1.99.2](https://github.com/openai/openai-python/compare/v1.99.1...v1.99.2) diff --git a/pyproject.toml b/pyproject.toml index 7ea0a63597..b2fc253ae6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.2" +version = "1.99.3" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 088935379f..982cd9724f 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.2" # x-release-please-version +__version__ = "1.99.3" # x-release-please-version From c81195ea2c8e7cded4d6e6fe66d0062efbf3d744 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 19:56:02 +0000 Subject: [PATCH 54/90] codegen metadata --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 4d8b1f059e..b82ecf95fa 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-f5c45f4ae5c2075cbc603d6910bba3da31c23714c209fbd3fd82a94f634a126b.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d7e255da603b878e7e823135520211ce6a9e02890c9d549bbf3953a877ee5ef3.yml openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: e53ea2d984c4e05a57eb0227fa379b2b +config_hash: f0e0ce47bee61bd779ccaad22930f186 From 2ae42a399755828f74ced0f2fa41d9bd3a83a198 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 7 Aug 2025 20:09:45 +0000 Subject: [PATCH 55/90] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index b82ecf95fa..a73b73fc2c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d7e255da603b878e7e823135520211ce6a9e02890c9d549bbf3953a877ee5ef3.yml openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: f0e0ce47bee61bd779ccaad22930f186 +config_hash: 2e7cf948f94e24f94c7d12ba2de2734a From 458a542a5f08dcf481292dfb04879cab27629b0c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 11:24:58 +0000 Subject: [PATCH 56/90] fix(types): rename chat completion tool --- .stats.yml | 4 +-- api.md | 2 +- src/openai/lib/_parsing/_completions.py | 18 ++++++------- src/openai/lib/streaming/chat/_completions.py | 15 +++++------ .../resources/chat/completions/completions.py | 26 +++++++++---------- src/openai/types/chat/__init__.py | 2 +- ...py => chat_completion_tool_union_param.py} | 4 +-- .../types/chat/completion_create_params.py | 4 +-- 8 files changed, 37 insertions(+), 38 deletions(-) rename src/openai/types/chat/{chat_completion_tool_param.py => chat_completion_tool_union_param.py} (69%) diff --git a/.stats.yml b/.stats.yml index a73b73fc2c..6a34d9da6e 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-d7e255da603b878e7e823135520211ce6a9e02890c9d549bbf3953a877ee5ef3.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-56d3a72a5caa187aebcf9de169a6a28a9dc3f70a79d7467a03a9e22595936066.yml openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: 2e7cf948f94e24f94c7d12ba2de2734a +config_hash: 7e18239879286d68a48ac5487a649aa6 diff --git a/api.md b/api.md index f05b3f61ee..f58c401311 100644 --- a/api.md +++ b/api.md @@ -79,7 +79,7 @@ from openai.types.chat import ( ChatCompletionStreamOptions, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, - ChatCompletionTool, + ChatCompletionToolUnion, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam, diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py index e14c33864d..fc0bd05e4d 100644 --- a/src/openai/lib/_parsing/_completions.py +++ b/src/openai/lib/_parsing/_completions.py @@ -21,13 +21,13 @@ ChatCompletionMessage, ParsedFunctionToolCall, ParsedChatCompletionMessage, + ChatCompletionToolUnionParam, ChatCompletionFunctionToolParam, completion_create_params, ) from ..._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError from ...types.shared_params import FunctionDefinition from ...types.chat.completion_create_params import ResponseFormat as ResponseFormatParam -from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam from ...types.chat.chat_completion_message_function_tool_call import Function ResponseFormatT = TypeVar( @@ -41,7 +41,7 @@ def is_strict_chat_completion_tool_param( - tool: ChatCompletionToolParam, + tool: ChatCompletionToolUnionParam, ) -> TypeGuard[ChatCompletionFunctionToolParam]: """Check if the given tool is a strict ChatCompletionFunctionToolParam.""" if not tool["type"] == "function": @@ -53,7 +53,7 @@ def is_strict_chat_completion_tool_param( def select_strict_chat_completion_tools( - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, ) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: """Select only the strict ChatCompletionFunctionToolParams from the given tools.""" if not is_given(tools): @@ -63,7 +63,7 @@ def select_strict_chat_completion_tools( def validate_input_tools( - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, ) -> Iterable[ChatCompletionFunctionToolParam] | NotGiven: if not is_given(tools): return NOT_GIVEN @@ -86,7 +86,7 @@ def validate_input_tools( def parse_chat_completion( *, response_format: type[ResponseFormatT] | completion_create_params.ResponseFormat | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, chat_completion: ChatCompletion | ParsedChatCompletion[object], ) -> ParsedChatCompletion[ResponseFormatT]: if is_given(input_tools): @@ -166,13 +166,13 @@ def parse_chat_completion( def get_input_tool_by_name( - *, input_tools: list[ChatCompletionToolParam], name: str + *, input_tools: list[ChatCompletionToolUnionParam], name: str ) -> ChatCompletionFunctionToolParam | None: return next((t for t in input_tools if t["type"] == "function" and t.get("function", {}).get("name") == name), None) def parse_function_tool_arguments( - *, input_tools: list[ChatCompletionToolParam], function: Function | ParsedFunction + *, input_tools: list[ChatCompletionToolUnionParam], function: Function | ParsedFunction ) -> object | None: input_tool = get_input_tool_by_name(input_tools=input_tools, name=function.name) if not input_tool: @@ -218,7 +218,7 @@ def solve_response_format_t( def has_parseable_input( *, response_format: type | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, ) -> bool: if has_rich_response_format(response_format): return True @@ -246,7 +246,7 @@ def is_response_format_param(response_format: object) -> TypeGuard[ResponseForma return is_dict(response_format) -def is_parseable_tool(input_tool: ChatCompletionToolParam) -> bool: +def is_parseable_tool(input_tool: ChatCompletionToolUnionParam) -> bool: if input_tool["type"] != "function": return False diff --git a/src/openai/lib/streaming/chat/_completions.py b/src/openai/lib/streaming/chat/_completions.py index 1dff628a20..52a6a550b2 100644 --- a/src/openai/lib/streaming/chat/_completions.py +++ b/src/openai/lib/streaming/chat/_completions.py @@ -37,12 +37,11 @@ parse_function_tool_arguments, ) from ...._streaming import Stream, AsyncStream -from ....types.chat import ChatCompletionChunk, ParsedChatCompletion +from ....types.chat import ChatCompletionChunk, ParsedChatCompletion, ChatCompletionToolUnionParam from ...._exceptions import LengthFinishReasonError, ContentFilterFinishReasonError from ....types.chat.chat_completion import ChoiceLogprobs from ....types.chat.chat_completion_chunk import Choice as ChoiceChunk from ....types.chat.completion_create_params import ResponseFormat as ResponseFormatParam -from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam class ChatCompletionStream(Generic[ResponseFormatT]): @@ -59,7 +58,7 @@ def __init__( *, raw_stream: Stream[ChatCompletionChunk], response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self._raw_stream = raw_stream self._response = raw_stream.response @@ -140,7 +139,7 @@ def __init__( api_request: Callable[[], Stream[ChatCompletionChunk]], *, response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self.__stream: ChatCompletionStream[ResponseFormatT] | None = None self.__api_request = api_request @@ -182,7 +181,7 @@ def __init__( *, raw_stream: AsyncStream[ChatCompletionChunk], response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self._raw_stream = raw_stream self._response = raw_stream.response @@ -263,7 +262,7 @@ def __init__( api_request: Awaitable[AsyncStream[ChatCompletionChunk]], *, response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven, ) -> None: self.__stream: AsyncChatCompletionStream[ResponseFormatT] | None = None self.__api_request = api_request @@ -315,7 +314,7 @@ class ChatCompletionStreamState(Generic[ResponseFormatT]): def __init__( self, *, - input_tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + input_tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven = NOT_GIVEN, ) -> None: self.__current_completion_snapshot: ParsedChatCompletionSnapshot | None = None @@ -585,7 +584,7 @@ def _build_events( class ChoiceEventState: - def __init__(self, *, input_tools: list[ChatCompletionToolParam]) -> None: + def __init__(self, *, input_tools: list[ChatCompletionToolUnionParam]) -> None: self._input_tools = input_tools self._content_done = False diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 65f91396bd..9404d85192 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -47,9 +47,9 @@ from ....types.chat.chat_completion_chunk import ChatCompletionChunk from ....types.chat.parsed_chat_completion import ParsedChatCompletion from ....types.chat.chat_completion_deleted import ChatCompletionDeleted -from ....types.chat.chat_completion_tool_param import ChatCompletionToolParam from ....types.chat.chat_completion_audio_param import ChatCompletionAudioParam from ....types.chat.chat_completion_message_param import ChatCompletionMessageParam +from ....types.chat.chat_completion_tool_union_param import ChatCompletionToolUnionParam from ....types.chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam from ....types.chat.chat_completion_prediction_content_param import ChatCompletionPredictionContentParam from ....types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam @@ -111,7 +111,7 @@ def parse( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -266,7 +266,7 @@ def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -555,7 +555,7 @@ def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -844,7 +844,7 @@ def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1133,7 +1133,7 @@ def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1408,7 +1408,7 @@ def stream( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1550,7 +1550,7 @@ async def parse( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1705,7 +1705,7 @@ async def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -1994,7 +1994,7 @@ async def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -2283,7 +2283,7 @@ async def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -2572,7 +2572,7 @@ async def create( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, @@ -2847,7 +2847,7 @@ def stream( stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, - tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, + tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index ce1cf4522a..1a814816cf 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -21,13 +21,13 @@ ParsedFunction as ParsedFunction, ParsedFunctionToolCall as ParsedFunctionToolCall, ) -from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam from .chat_completion_function_tool import ChatCompletionFunctionTool as ChatCompletionFunctionTool from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort +from .chat_completion_tool_union_param import ChatCompletionToolUnionParam as ChatCompletionToolUnionParam from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall diff --git a/src/openai/types/chat/chat_completion_tool_param.py b/src/openai/types/chat/chat_completion_tool_union_param.py similarity index 69% rename from src/openai/types/chat/chat_completion_tool_param.py rename to src/openai/types/chat/chat_completion_tool_union_param.py index 7cd9743ea3..0f8bf7b0e7 100644 --- a/src/openai/types/chat/chat_completion_tool_param.py +++ b/src/openai/types/chat/chat_completion_tool_union_param.py @@ -8,6 +8,6 @@ from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam -__all__ = ["ChatCompletionToolParam"] +__all__ = ["ChatCompletionToolUnionParam"] -ChatCompletionToolParam: TypeAlias = Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam] +ChatCompletionToolUnionParam: TypeAlias = Union[ChatCompletionFunctionToolParam, ChatCompletionCustomToolParam] diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 011067af1a..a3bc90b0a2 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -8,9 +8,9 @@ from ..shared.chat_model import ChatModel from ..shared_params.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort -from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam from .chat_completion_message_param import ChatCompletionMessageParam +from .chat_completion_tool_union_param import ChatCompletionToolUnionParam from ..shared_params.function_parameters import FunctionParameters from ..shared_params.response_format_text import ResponseFormatText from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam @@ -284,7 +284,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): are present. """ - tools: Iterable[ChatCompletionToolParam] + tools: Iterable[ChatCompletionToolUnionParam] """A list of tools the model may call. You can provide either From 05a35a57b2fc39acd9132e9a7b9f25d4a59be698 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 8 Aug 2025 12:28:58 +0100 Subject: [PATCH 57/90] fix(types): revert ChatCompletionToolParam to a TypedDict --- src/openai/types/chat/__init__.py | 1 + src/openai/types/chat/chat_completion_tool_param.py | 11 +++++++++++ tests/compat/test_tool_param.py | 8 ++++++++ 3 files changed, 20 insertions(+) create mode 100644 src/openai/types/chat/chat_completion_tool_param.py create mode 100644 tests/compat/test_tool_param.py diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 1a814816cf..c9e77ff41c 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -21,6 +21,7 @@ ParsedFunction as ParsedFunction, ParsedFunctionToolCall as ParsedFunctionToolCall, ) +from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam from .chat_completion_function_tool import ChatCompletionFunctionTool as ChatCompletionFunctionTool from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam diff --git a/src/openai/types/chat/chat_completion_tool_param.py b/src/openai/types/chat/chat_completion_tool_param.py new file mode 100644 index 0000000000..ef3b6d07c6 --- /dev/null +++ b/src/openai/types/chat/chat_completion_tool_param.py @@ -0,0 +1,11 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypeAlias + +from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam + +__all__ = ["ChatCompletionToolParam"] + +ChatCompletionToolParam: TypeAlias = ChatCompletionFunctionToolParam diff --git a/tests/compat/test_tool_param.py b/tests/compat/test_tool_param.py new file mode 100644 index 0000000000..f2f84c6e94 --- /dev/null +++ b/tests/compat/test_tool_param.py @@ -0,0 +1,8 @@ +from openai.types.chat import ChatCompletionToolParam + + +def test_tool_param_can_be_instantiated() -> None: + assert ChatCompletionToolParam(type="function", function={"name": "test"}) == { + "function": {"name": "test"}, + "type": "function", + } From 09f98acf6bf7b66e98a4b6c3e37433ccdee0e20e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 11:32:31 +0000 Subject: [PATCH 58/90] release: 1.99.4 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 62255b70d8..cdb9c7d0d7 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.3" + ".": "1.99.4" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d06c6548e..f8fdb7a268 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 1.99.4 (2025-08-08) + +Full Changelog: [v1.99.3...v1.99.4](https://github.com/openai/openai-python/compare/v1.99.3...v1.99.4) + +### Bug Fixes + +* **types:** rename chat completion tool ([8d3bf88](https://github.com/openai/openai-python/commit/8d3bf88f5bc11cf30b8b050c24b2cc5a3807614f)) +* **types:** revert ChatCompletionToolParam to a TypedDict ([3f4ae72](https://github.com/openai/openai-python/commit/3f4ae725af53e631ddc128c1c6862ecf0b08e073)) + ## 1.99.3 (2025-08-07) Full Changelog: [v1.99.2...v1.99.3](https://github.com/openai/openai-python/compare/v1.99.2...v1.99.3) diff --git a/pyproject.toml b/pyproject.toml index b2fc253ae6..b041682135 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.3" +version = "1.99.4" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 982cd9724f..04f835f838 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.3" # x-release-please-version +__version__ = "1.99.4" # x-release-please-version From f4e41b87f7bf5597dadb0e42e11d33c093e89b5c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 14:56:34 +0000 Subject: [PATCH 59/90] fix(client): fix verbosity parameter location in Responses fixes error with unsupported `verbosity` parameter by correctly placing it inside the `text` parameter --- .stats.yml | 4 +-- src/openai/resources/responses/responses.py | 34 ------------------- .../types/responses/response_create_params.py | 8 ----- .../types/responses/response_text_config.py | 9 +++++ .../responses/response_text_config_param.py | 11 +++++- tests/api_resources/test_responses.py | 24 ++++++++----- 6 files changed, 37 insertions(+), 53 deletions(-) diff --git a/.stats.yml b/.stats.yml index 6a34d9da6e..1c85ee4a0c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-56d3a72a5caa187aebcf9de169a6a28a9dc3f70a79d7467a03a9e22595936066.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6a1bfd4738fff02ef5becc3fdb2bf0cd6c026f2c924d4147a2a515474477dd9a.yml openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: 7e18239879286d68a48ac5487a649aa6 +config_hash: a67c5e195a59855fe8a5db0dc61a3e7f diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 5ba22418ed..8983daf278 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -102,7 +102,6 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -291,10 +290,6 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). - verbosity: Constrains the verbosity of the model's response. Lower values will result in - more concise responses, while higher values will result in more verbose - responses. Currently supported values are `low`, `medium`, and `high`. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -335,7 +330,6 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -524,10 +518,6 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). - verbosity: Constrains the verbosity of the model's response. Lower values will result in - more concise responses, while higher values will result in more verbose - responses. Currently supported values are `low`, `medium`, and `high`. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -568,7 +558,6 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -757,10 +746,6 @@ def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). - verbosity: Constrains the verbosity of the model's response. Lower values will result in - more concise responses, while higher values will result in more verbose - responses. Currently supported values are `low`, `medium`, and `high`. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -800,7 +785,6 @@ def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -838,7 +822,6 @@ def create( "top_p": top_p, "truncation": truncation, "user": user, - "verbosity": verbosity, }, response_create_params.ResponseCreateParamsStreaming if stream @@ -1485,7 +1468,6 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1674,10 +1656,6 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). - verbosity: Constrains the verbosity of the model's response. Lower values will result in - more concise responses, while higher values will result in more verbose - responses. Currently supported values are `low`, `medium`, and `high`. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1718,7 +1696,6 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1907,10 +1884,6 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). - verbosity: Constrains the verbosity of the model's response. Lower values will result in - more concise responses, while higher values will result in more verbose - responses. Currently supported values are `low`, `medium`, and `high`. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -1951,7 +1924,6 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2140,10 +2112,6 @@ async def create( similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). - verbosity: Constrains the verbosity of the model's response. Lower values will result in - more concise responses, while higher values will result in more verbose - responses. Currently supported values are `low`, `medium`, and `high`. - extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -2183,7 +2151,6 @@ async def create( top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, - verbosity: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -2221,7 +2188,6 @@ async def create( "top_p": top_p, "truncation": truncation, "user": user, - "verbosity": verbosity, }, response_create_params.ResponseCreateParamsStreaming if stream diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 53af325328..ea91fa1265 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -253,14 +253,6 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ - verbosity: Optional[Literal["low", "medium", "high"]] - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ - class StreamOptions(TypedDict, total=False): include_obfuscation: bool diff --git a/src/openai/types/responses/response_text_config.py b/src/openai/types/responses/response_text_config.py index a1894a9176..c53546da6d 100644 --- a/src/openai/types/responses/response_text_config.py +++ b/src/openai/types/responses/response_text_config.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Optional +from typing_extensions import Literal from ..._models import BaseModel from .response_format_text_config import ResponseFormatTextConfig @@ -24,3 +25,11 @@ class ResponseTextConfig(BaseModel): ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. """ + + verbosity: Optional[Literal["low", "medium", "high"]] = None + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ diff --git a/src/openai/types/responses/response_text_config_param.py b/src/openai/types/responses/response_text_config_param.py index aec064bf89..1229fce35b 100644 --- a/src/openai/types/responses/response_text_config_param.py +++ b/src/openai/types/responses/response_text_config_param.py @@ -2,7 +2,8 @@ from __future__ import annotations -from typing_extensions import TypedDict +from typing import Optional +from typing_extensions import Literal, TypedDict from .response_format_text_config_param import ResponseFormatTextConfigParam @@ -25,3 +26,11 @@ class ResponseTextConfigParam(TypedDict, total=False): ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. """ + + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 4f8c88fa27..310800b87e 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -55,7 +55,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: stream=False, stream_options={"include_obfuscation": True}, temperature=1, - text={"format": {"type": "text"}}, + text={ + "format": {"type": "text"}, + "verbosity": "low", + }, tool_choice="none", tools=[ { @@ -70,7 +73,6 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: top_p=1, truncation="auto", user="user-1234", - verbosity="low", ) assert_matches_type(Response, response, path=["response"]) @@ -131,7 +133,10 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: store=True, stream_options={"include_obfuscation": True}, temperature=1, - text={"format": {"type": "text"}}, + text={ + "format": {"type": "text"}, + "verbosity": "low", + }, tool_choice="none", tools=[ { @@ -146,7 +151,6 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: top_p=1, truncation="auto", user="user-1234", - verbosity="low", ) response_stream.response.close() @@ -402,7 +406,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn stream=False, stream_options={"include_obfuscation": True}, temperature=1, - text={"format": {"type": "text"}}, + text={ + "format": {"type": "text"}, + "verbosity": "low", + }, tool_choice="none", tools=[ { @@ -417,7 +424,6 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn top_p=1, truncation="auto", user="user-1234", - verbosity="low", ) assert_matches_type(Response, response, path=["response"]) @@ -478,7 +484,10 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn store=True, stream_options={"include_obfuscation": True}, temperature=1, - text={"format": {"type": "text"}}, + text={ + "format": {"type": "text"}, + "verbosity": "low", + }, tool_choice="none", tools=[ { @@ -493,7 +502,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn top_p=1, truncation="auto", user="user-1234", - verbosity="low", ) await response_stream.response.aclose() From 7aa3c787b99adf9b93f0652aacafa1200c681877 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 14:57:03 +0000 Subject: [PATCH 60/90] release: 1.99.5 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index cdb9c7d0d7..393c24840d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.4" + ".": "1.99.5" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index f8fdb7a268..3d332955ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.99.5 (2025-08-08) + +Full Changelog: [v1.99.4...v1.99.5](https://github.com/openai/openai-python/compare/v1.99.4...v1.99.5) + +### Bug Fixes + +* **client:** fix verbosity parameter location in Responses ([2764ff4](https://github.com/openai/openai-python/commit/2764ff459eb8b309d25b39b40e363b16a5b95019)) + ## 1.99.4 (2025-08-08) Full Changelog: [v1.99.3...v1.99.4](https://github.com/openai/openai-python/compare/v1.99.3...v1.99.4) diff --git a/pyproject.toml b/pyproject.toml index b041682135..ca255c95bd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.4" +version = "1.99.5" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 04f835f838..12270a03d4 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.4" # x-release-please-version +__version__ = "1.99.5" # x-release-please-version From 52c48df8be298984eb2233fec71dc7765472f65e Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Fri, 8 Aug 2025 18:14:16 +0100 Subject: [PATCH 61/90] fix(types): re-export more tool call types --- src/openai/types/chat/chat_completion_message_tool_call.py | 4 ++-- src/openai/types/chat/chat_completion_tool_param.py | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/openai/types/chat/chat_completion_message_tool_call.py b/src/openai/types/chat/chat_completion_message_tool_call.py index c254774626..94cc086e9d 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_tool_call.py @@ -5,9 +5,9 @@ from ..._utils import PropertyInfo from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall -from .chat_completion_message_function_tool_call import ChatCompletionMessageFunctionToolCall +from .chat_completion_message_function_tool_call import Function as Function, ChatCompletionMessageFunctionToolCall -__all__ = ["ChatCompletionMessageToolCall"] +__all__ = ["ChatCompletionMessageToolCall", "Function"] ChatCompletionMessageToolCall: TypeAlias = Annotated[ Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], diff --git a/src/openai/types/chat/chat_completion_tool_param.py b/src/openai/types/chat/chat_completion_tool_param.py index ef3b6d07c6..a18b13b471 100644 --- a/src/openai/types/chat/chat_completion_tool_param.py +++ b/src/openai/types/chat/chat_completion_tool_param.py @@ -4,8 +4,11 @@ from typing_extensions import TypeAlias -from .chat_completion_function_tool_param import ChatCompletionFunctionToolParam +from .chat_completion_function_tool_param import ( + FunctionDefinition as FunctionDefinition, + ChatCompletionFunctionToolParam, +) -__all__ = ["ChatCompletionToolParam"] +__all__ = ["ChatCompletionToolParam", "FunctionDefinition"] ChatCompletionToolParam: TypeAlias = ChatCompletionFunctionToolParam From 5dc3476754d02f487a7eefc743b97053ff4b533f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 17:58:48 +0000 Subject: [PATCH 62/90] chore: update @stainless-api/prism-cli to v5.15.0 --- scripts/mock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mock b/scripts/mock index d2814ae6a0..0b28f6ea23 100755 --- a/scripts/mock +++ b/scripts/mock @@ -21,7 +21,7 @@ echo "==> Starting mock server with URL ${URL}" # Run prism mock on the given spec if [ "$1" == "--daemon" ]; then - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" &> .prism.log & # Wait for server to come online echo -n "Waiting for server" @@ -37,5 +37,5 @@ if [ "$1" == "--daemon" ]; then echo else - npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" + npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock "$URL" fi From 4df12615b6dd4bcc860d4064920878749195b80e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 8 Aug 2025 21:23:11 +0000 Subject: [PATCH 63/90] chore(internal): update comment in script --- scripts/test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/test b/scripts/test index 2b87845670..dbeda2d217 100755 --- a/scripts/test +++ b/scripts/test @@ -43,7 +43,7 @@ elif ! prism_is_running ; then echo -e "To run the server, pass in the path or url of your OpenAPI" echo -e "spec to the prism command:" echo - echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" + echo -e " \$ ${YELLOW}npm exec --package=@stainless-api/prism-cli@5.15.0 -- prism mock path/to/your.openapi.yml${NC}" echo exit 1 From 4d8c14cdc13772f6cc68be5eee6772b215f82c58 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 9 Aug 2025 05:04:12 +0000 Subject: [PATCH 64/90] release: 1.99.6 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 14 ++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 393c24840d..03128d3ade 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.5" + ".": "1.99.6" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 3d332955ef..8edff34439 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 1.99.6 (2025-08-09) + +Full Changelog: [v1.99.5...v1.99.6](https://github.com/openai/openai-python/compare/v1.99.5...v1.99.6) + +### Bug Fixes + +* **types:** re-export more tool call types ([8fe5741](https://github.com/openai/openai-python/commit/8fe574131cfe8f0453788cc6105d22834e7c102f)) + + +### Chores + +* **internal:** update comment in script ([e407bb5](https://github.com/openai/openai-python/commit/e407bb52112ad73e5eedf929434ee4ff7ac5a5a8)) +* update @stainless-api/prism-cli to v5.15.0 ([a1883fc](https://github.com/openai/openai-python/commit/a1883fcdfa02b81e5129bdb43206597a51f885fa)) + ## 1.99.5 (2025-08-08) Full Changelog: [v1.99.4...v1.99.5](https://github.com/openai/openai-python/compare/v1.99.4...v1.99.5) diff --git a/pyproject.toml b/pyproject.toml index ca255c95bd..37e9d4f767 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.5" +version = "1.99.6" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 12270a03d4..eed63aadba 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.5" # x-release-please-version +__version__ = "1.99.6" # x-release-please-version From bff85cddc49047d2e2a31c08ed1dfa2c8dcdd255 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 12:56:29 +0000 Subject: [PATCH 65/90] fix(types): rename ChatCompletionMessageToolCallParam --- .stats.yml | 4 ++-- api.md | 2 +- src/openai/types/chat/__init__.py | 8 ++++---- .../types/chat/chat_completion_assistant_message_param.py | 4 ++-- src/openai/types/chat/chat_completion_message.py | 4 ++-- .../types/chat/chat_completion_message_tool_call.py | 4 ++-- ...y => chat_completion_message_tool_call_union_param.py} | 4 ++-- 7 files changed, 15 insertions(+), 15 deletions(-) rename src/openai/types/chat/{chat_completion_message_tool_call_param.py => chat_completion_message_tool_call_union_param.py} (81%) diff --git a/.stats.yml b/.stats.yml index 1c85ee4a0c..a098c3d40d 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6a1bfd4738fff02ef5becc3fdb2bf0cd6c026f2c924d4147a2a515474477dd9a.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9cadfad609f94f20ebf74fdc06a80302f1a324dc69700a309a8056aabca82fd2.yml openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: a67c5e195a59855fe8a5db0dc61a3e7f +config_hash: 68337b532875626269c304372a669f67 diff --git a/api.md b/api.md index f58c401311..92b068b134 100644 --- a/api.md +++ b/api.md @@ -69,7 +69,7 @@ from openai.types.chat import ( ChatCompletionMessageCustomToolCall, ChatCompletionMessageFunctionToolCall, ChatCompletionMessageParam, - ChatCompletionMessageToolCall, + ChatCompletionMessageToolCallUnion, ChatCompletionModality, ChatCompletionNamedToolChoice, ChatCompletionNamedToolChoiceCustom, diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index c9e77ff41c..25ad0bfda6 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -31,7 +31,7 @@ from .chat_completion_tool_union_param import ChatCompletionToolUnionParam as ChatCompletionToolUnionParam from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam -from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall +from .chat_completion_message_tool_call import ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam @@ -52,9 +52,6 @@ from .chat_completion_developer_message_param import ( ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, ) -from .chat_completion_message_tool_call_param import ( - ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam, -) from .chat_completion_named_tool_choice_param import ( ChatCompletionNamedToolChoiceParam as ChatCompletionNamedToolChoiceParam, ) @@ -82,6 +79,9 @@ from .chat_completion_message_function_tool_call import ( ChatCompletionMessageFunctionToolCall as ChatCompletionMessageFunctionToolCall, ) +from .chat_completion_message_tool_call_union_param import ( + ChatCompletionMessageToolCallUnionParam as ChatCompletionMessageToolCallUnionParam, +) from .chat_completion_content_part_input_audio_param import ( ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam, ) diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 35e3a3d784..212d933e9b 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -6,8 +6,8 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam -from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam +from .chat_completion_message_tool_call_union_param import ChatCompletionMessageToolCallUnionParam __all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"] @@ -66,5 +66,5 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): refusal: Optional[str] """The refusal message by the assistant.""" - tool_calls: Iterable[ChatCompletionMessageToolCallParam] + tool_calls: Iterable[ChatCompletionMessageToolCallUnionParam] """The tool calls generated by the model, such as function calls.""" diff --git a/src/openai/types/chat/chat_completion_message.py b/src/openai/types/chat/chat_completion_message.py index c659ac3da0..5bb153fe3f 100644 --- a/src/openai/types/chat/chat_completion_message.py +++ b/src/openai/types/chat/chat_completion_message.py @@ -5,7 +5,7 @@ from ..._models import BaseModel from .chat_completion_audio import ChatCompletionAudio -from .chat_completion_message_tool_call import ChatCompletionMessageToolCall +from .chat_completion_message_tool_call import ChatCompletionMessageToolCallUnion __all__ = ["ChatCompletionMessage", "Annotation", "AnnotationURLCitation", "FunctionCall"] @@ -75,5 +75,5 @@ class ChatCompletionMessage(BaseModel): model. """ - tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None + tool_calls: Optional[List[ChatCompletionMessageToolCallUnion]] = None """The tool calls generated by the model, such as function calls.""" diff --git a/src/openai/types/chat/chat_completion_message_tool_call.py b/src/openai/types/chat/chat_completion_message_tool_call.py index 94cc086e9d..df687b19bd 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_tool_call.py @@ -7,9 +7,9 @@ from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall from .chat_completion_message_function_tool_call import Function as Function, ChatCompletionMessageFunctionToolCall -__all__ = ["ChatCompletionMessageToolCall", "Function"] +__all__ = [ "Function", "ChatCompletionMessageToolCallUnion"] -ChatCompletionMessageToolCall: TypeAlias = Annotated[ +ChatCompletionMessageToolCallUnion: TypeAlias = Annotated[ Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/chat/chat_completion_message_tool_call_param.py b/src/openai/types/chat/chat_completion_message_tool_call_union_param.py similarity index 81% rename from src/openai/types/chat/chat_completion_message_tool_call_param.py rename to src/openai/types/chat/chat_completion_message_tool_call_union_param.py index 96ba6521f0..fcca9bb116 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call_param.py +++ b/src/openai/types/chat/chat_completion_message_tool_call_union_param.py @@ -8,8 +8,8 @@ from .chat_completion_message_custom_tool_call_param import ChatCompletionMessageCustomToolCallParam from .chat_completion_message_function_tool_call_param import ChatCompletionMessageFunctionToolCallParam -__all__ = ["ChatCompletionMessageToolCallParam"] +__all__ = ["ChatCompletionMessageToolCallUnionParam"] -ChatCompletionMessageToolCallParam: TypeAlias = Union[ +ChatCompletionMessageToolCallUnionParam: TypeAlias = Union[ ChatCompletionMessageFunctionToolCallParam, ChatCompletionMessageCustomToolCallParam ] From a6beda8e67a29c21d2fd2c447a9cb6c61fc1685c Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 11 Aug 2025 14:08:27 +0100 Subject: [PATCH 66/90] fix(types): revert ChatCompletionMessageToolCallParam to a TypedDict --- src/openai/types/chat/__init__.py | 3 +++ .../chat_completion_message_tool_call_param.py | 14 ++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 src/openai/types/chat/chat_completion_message_tool_call_param.py diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 25ad0bfda6..2aecaf7d0c 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -52,6 +52,9 @@ from .chat_completion_developer_message_param import ( ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, ) +from .chat_completion_message_tool_call_param import ( + ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam, +) from .chat_completion_named_tool_choice_param import ( ChatCompletionNamedToolChoiceParam as ChatCompletionNamedToolChoiceParam, ) diff --git a/src/openai/types/chat/chat_completion_message_tool_call_param.py b/src/openai/types/chat/chat_completion_message_tool_call_param.py new file mode 100644 index 0000000000..6baa1b57ab --- /dev/null +++ b/src/openai/types/chat/chat_completion_message_tool_call_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypeAlias + +from .chat_completion_message_function_tool_call_param import ( + Function as Function, + ChatCompletionMessageFunctionToolCallParam, +) + +__all__ = ["ChatCompletionMessageToolCallParam", "Function"] + +ChatCompletionMessageToolCallParam: TypeAlias = ChatCompletionMessageFunctionToolCallParam From 23887e4b9180f62e634f95ae4dff1ace447a630a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 13:09:54 +0000 Subject: [PATCH 67/90] release: 1.99.7 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 9 +++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 03128d3ade..804a6039aa 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.6" + ".": "1.99.7" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 8edff34439..74d0da964a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,14 @@ # Changelog +## 1.99.7 (2025-08-11) + +Full Changelog: [v1.99.6...v1.99.7](https://github.com/openai/openai-python/compare/v1.99.6...v1.99.7) + +### Bug Fixes + +* **types:** rename ChatCompletionMessageToolCallParam ([48085e2](https://github.com/openai/openai-python/commit/48085e2f473799d079e71d48d2f5612a6fbeb976)) +* **types:** revert ChatCompletionMessageToolCallParam to a TypedDict ([c8e9cec](https://github.com/openai/openai-python/commit/c8e9cec5c93cc022fff546f27161717f769d1f81)) + ## 1.99.6 (2025-08-09) Full Changelog: [v1.99.5...v1.99.6](https://github.com/openai/openai-python/compare/v1.99.5...v1.99.6) diff --git a/pyproject.toml b/pyproject.toml index 37e9d4f767..d58b9b1eb2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.6" +version = "1.99.7" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index eed63aadba..3db3f866cf 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.6" # x-release-please-version +__version__ = "1.99.7" # x-release-please-version From f03096cb7ce9343fd88f16e1c1b93dcc794279b4 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 11 Aug 2025 16:19:50 +0100 Subject: [PATCH 68/90] chore(internal/tests): add inline snapshot format command --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index d58b9b1eb2..97ec8cf43d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -150,6 +150,9 @@ filterwarnings = [ "error" ] +[tool.inline-snapshot] +format-command="ruff format --stdin-filename {filename}" + [tool.pyright] # this enables practically every flag given by pyright. # there are a couple of flags that are still disabled by From 266edeba335834f2009e59c0a4a1ded8cb45749d Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 11 Aug 2025 16:20:48 +0100 Subject: [PATCH 69/90] refactor(tests): share snapshot utils --- tests/lib/chat/test_completions.py | 120 +++---------------- tests/lib/chat/test_completions_streaming.py | 2 +- tests/lib/snapshots.py | 99 +++++++++++++++ tests/lib/{chat/_utils.py => utils.py} | 2 +- 4 files changed, 118 insertions(+), 105 deletions(-) create mode 100644 tests/lib/snapshots.py rename tests/lib/{chat/_utils.py => utils.py} (98%) diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index d0bd14ce9e..3ef2e74c19 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -1,12 +1,9 @@ from __future__ import annotations -import os -import json from enum import Enum -from typing import Any, List, Callable, Optional, Awaitable +from typing import List, Optional from typing_extensions import Literal, TypeVar -import httpx import pytest from respx import MockRouter from pydantic import Field, BaseModel @@ -17,8 +14,9 @@ from openai._utils import assert_signatures_in_sync from openai._compat import PYDANTIC_V2 -from ._utils import print_obj, get_snapshot_value +from ..utils import print_obj from ...conftest import base_url +from ..snapshots import make_snapshot_request, make_async_snapshot_request from ..schema_types.query import Query _T = TypeVar("_T") @@ -32,7 +30,7 @@ @pytest.mark.respx(base_url=base_url) def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -100,7 +98,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -170,7 +168,7 @@ class Location(BaseModel): temperature: float units: Optional[Literal["c", "f"]] = None - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -247,7 +245,7 @@ class ColorDetection(BaseModel): if not PYDANTIC_V2: ColorDetection.update_forward_refs(**locals()) # type: ignore - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -292,7 +290,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -375,7 +373,7 @@ class CalendarEvent: date: str participants: List[str] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -436,7 +434,7 @@ class CalendarEvent: @pytest.mark.respx(base_url=base_url) def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -521,7 +519,7 @@ class Location(BaseModel): units: Literal["c", "f"] with pytest.raises(openai.LengthFinishReasonError): - _make_snapshot_request( + make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -548,7 +546,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -596,7 +594,7 @@ class GetWeatherArgs(BaseModel): country: str units: Literal["c", "f"] = "c" - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -662,7 +660,7 @@ class GetStockPrice(BaseModel): ticker: str exchange: str - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -733,7 +731,7 @@ class GetStockPrice(BaseModel): @pytest.mark.respx(base_url=base_url) def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: pytest.MonkeyPatch) -> None: - completion = _make_snapshot_request( + completion = make_snapshot_request( lambda c: c.chat.completions.parse( model="gpt-4o-2024-08-06", messages=[ @@ -830,7 +828,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - response = _make_snapshot_request( + response = make_snapshot_request( lambda c: c.chat.completions.with_raw_response.parse( model="gpt-4o-2024-08-06", messages=[ @@ -906,7 +904,7 @@ class Location(BaseModel): temperature: float units: Literal["c", "f"] - response = await _make_async_snapshot_request( + response = await make_async_snapshot_request( lambda c: c.chat.completions.with_raw_response.parse( model="gpt-4o-2024-08-06", messages=[ @@ -981,87 +979,3 @@ def test_parse_method_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpe checking_client.chat.completions.parse, exclude_params={"response_format", "stream"}, ) - - -def _make_snapshot_request( - func: Callable[[OpenAI], _T], - *, - content_snapshot: Any, - respx_mock: MockRouter, - mock_client: OpenAI, -) -> _T: - live = os.environ.get("OPENAI_LIVE") == "1" - if live: - - def _on_response(response: httpx.Response) -> None: - # update the content snapshot - assert json.dumps(json.loads(response.read())) == content_snapshot - - respx_mock.stop() - - client = OpenAI( - http_client=httpx.Client( - event_hooks={ - "response": [_on_response], - } - ) - ) - else: - respx_mock.post("/chat/completions").mock( - return_value=httpx.Response( - 200, - content=get_snapshot_value(content_snapshot), - headers={"content-type": "application/json"}, - ) - ) - - client = mock_client - - result = func(client) - - if live: - client.close() - - return result - - -async def _make_async_snapshot_request( - func: Callable[[AsyncOpenAI], Awaitable[_T]], - *, - content_snapshot: Any, - respx_mock: MockRouter, - mock_client: AsyncOpenAI, -) -> _T: - live = os.environ.get("OPENAI_LIVE") == "1" - if live: - - async def _on_response(response: httpx.Response) -> None: - # update the content snapshot - assert json.dumps(json.loads(await response.aread())) == content_snapshot - - respx_mock.stop() - - client = AsyncOpenAI( - http_client=httpx.AsyncClient( - event_hooks={ - "response": [_on_response], - } - ) - ) - else: - respx_mock.post("/chat/completions").mock( - return_value=httpx.Response( - 200, - content=get_snapshot_value(content_snapshot), - headers={"content-type": "application/json"}, - ) - ) - - client = mock_client - - result = await func(client) - - if live: - await client.close() - - return result diff --git a/tests/lib/chat/test_completions_streaming.py b/tests/lib/chat/test_completions_streaming.py index 1daa98c6a0..65826d28d9 100644 --- a/tests/lib/chat/test_completions_streaming.py +++ b/tests/lib/chat/test_completions_streaming.py @@ -30,7 +30,7 @@ ) from openai.lib._parsing._completions import ResponseFormatT -from ._utils import print_obj, get_snapshot_value +from ..utils import print_obj, get_snapshot_value from ...conftest import base_url _T = TypeVar("_T") diff --git a/tests/lib/snapshots.py b/tests/lib/snapshots.py new file mode 100644 index 0000000000..64b1163338 --- /dev/null +++ b/tests/lib/snapshots.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import os +import json +from typing import Any, Callable, Awaitable +from typing_extensions import TypeVar + +import httpx +from respx import MockRouter + +from openai import OpenAI, AsyncOpenAI + +from .utils import get_snapshot_value + +_T = TypeVar("_T") + + +def make_snapshot_request( + func: Callable[[OpenAI], _T], + *, + content_snapshot: Any, + respx_mock: MockRouter, + mock_client: OpenAI, +) -> _T: + live = os.environ.get("OPENAI_LIVE") == "1" + if live: + + def _on_response(response: httpx.Response) -> None: + # update the content snapshot + assert json.dumps(json.loads(response.read())) == content_snapshot + + respx_mock.stop() + + client = OpenAI( + http_client=httpx.Client( + event_hooks={ + "response": [_on_response], + } + ) + ) + else: + respx_mock.post("/chat/completions").mock( + return_value=httpx.Response( + 200, + content=get_snapshot_value(content_snapshot), + headers={"content-type": "application/json"}, + ) + ) + + client = mock_client + + result = func(client) + + if live: + client.close() + + return result + + +async def make_async_snapshot_request( + func: Callable[[AsyncOpenAI], Awaitable[_T]], + *, + content_snapshot: Any, + respx_mock: MockRouter, + mock_client: AsyncOpenAI, +) -> _T: + live = os.environ.get("OPENAI_LIVE") == "1" + if live: + + async def _on_response(response: httpx.Response) -> None: + # update the content snapshot + assert json.dumps(json.loads(await response.aread())) == content_snapshot + + respx_mock.stop() + + client = AsyncOpenAI( + http_client=httpx.AsyncClient( + event_hooks={ + "response": [_on_response], + } + ) + ) + else: + respx_mock.post("/chat/completions").mock( + return_value=httpx.Response( + 200, + content=get_snapshot_value(content_snapshot), + headers={"content-type": "application/json"}, + ) + ) + + client = mock_client + + result = await func(client) + + if live: + await client.close() + + return result diff --git a/tests/lib/chat/_utils.py b/tests/lib/utils.py similarity index 98% rename from tests/lib/chat/_utils.py rename to tests/lib/utils.py index 0cc1c99952..2129ee811a 100644 --- a/tests/lib/chat/_utils.py +++ b/tests/lib/utils.py @@ -7,7 +7,7 @@ import pytest import pydantic -from ...utils import rich_print_str +from ..utils import rich_print_str ReprArgs: TypeAlias = "Iterable[tuple[str | None, Any]]" From fd0af12000ff807e558039d9780e0e41bbf6bf2f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 11 Aug 2025 16:20:56 +0100 Subject: [PATCH 70/90] chore(internal): fix formatting --- src/openai/types/chat/chat_completion_message_tool_call.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/openai/types/chat/chat_completion_message_tool_call.py b/src/openai/types/chat/chat_completion_message_tool_call.py index df687b19bd..be01179701 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_tool_call.py @@ -7,7 +7,7 @@ from .chat_completion_message_custom_tool_call import ChatCompletionMessageCustomToolCall from .chat_completion_message_function_tool_call import Function as Function, ChatCompletionMessageFunctionToolCall -__all__ = [ "Function", "ChatCompletionMessageToolCallUnion"] +__all__ = ["Function", "ChatCompletionMessageToolCallUnion"] ChatCompletionMessageToolCallUnion: TypeAlias = Annotated[ Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], From a4cd0b5086a419ccf02981f61dccb4b23f6e85a0 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 11 Aug 2025 16:28:37 +0100 Subject: [PATCH 71/90] chore(tests): add responses output_text test --- tests/lib/chat/test_completions.py | 14 ++++++++++ tests/lib/responses/__init__.py | 0 tests/lib/responses/test_responses.py | 40 +++++++++++++++++++++++++++ tests/lib/snapshots.py | 6 ++-- 4 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 tests/lib/responses/__init__.py create mode 100644 tests/lib/responses/test_responses.py diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index 3ef2e74c19..0371f6828b 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -43,6 +43,7 @@ def test_parse_nothing(client: OpenAI, respx_mock: MockRouter, monkeypatch: pyte content_snapshot=snapshot( '{"id": "chatcmpl-ABfvaueLEMLNYbT8YzpJxsmiQ6HSY", "object": "chat.completion", "created": 1727346142, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "I\'m unable to provide real-time weather updates. To get the current weather in San Francisco, I recommend checking a reliable weather website or app like the Weather Channel or a local news station.", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 14, "completion_tokens": 37, "total_tokens": 51, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -112,6 +113,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvbtVnTu5DeC4EFnRYj8mtfOM99", "object": "chat.completion", "created": 1727346143, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -182,6 +184,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvcC8grKYsRkSoMp9CCAhbXAd0b", "object": "chat.completion", "created": 1727346144, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 88, "completion_tokens": 14, "total_tokens": 102, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -256,6 +259,7 @@ class ColorDetection(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvjIatz0zrZu50gRbMtlp0asZpz", "object": "chat.completion", "created": 1727346151, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"color\\":\\"red\\",\\"hex_color_code\\":\\"#FF0000\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 109, "completion_tokens": 14, "total_tokens": 123, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -305,6 +309,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvp8qzboW92q8ONDF4DPHlI7ckC", "object": "chat.completion", "created": 1727346157, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":64,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 1, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}, {"index": 2, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":63.0,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 44, "total_tokens": 123, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -385,6 +390,7 @@ class CalendarEvent: content_snapshot=snapshot( '{"id": "chatcmpl-ABfvqhz4uUUWsw8Ohw2Mp9B4sKKV8", "object": "chat.completion", "created": 1727346158, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"name\\":\\"Science Fair\\",\\"date\\":\\"Friday\\",\\"participants\\":[\\"Alice\\",\\"Bob\\"]}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 92, "completion_tokens": 17, "total_tokens": 109, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -449,6 +455,7 @@ def test_pydantic_tool_model_all_types(client: OpenAI, respx_mock: MockRouter, m content_snapshot=snapshot( '{"id": "chatcmpl-ABfvtNiaTNUF6OymZUnEFc9lPq9p1", "object": "chat.completion", "created": 1727346161, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_NKpApJybW1MzOjZO2FzwYw0d", "type": "function", "function": {"name": "Query", "arguments": "{\\"name\\":\\"May 2022 Fulfilled Orders Not Delivered on Time\\",\\"table_name\\":\\"orders\\",\\"columns\\":[\\"id\\",\\"status\\",\\"expected_delivery_date\\",\\"delivered_at\\",\\"shipped_at\\",\\"ordered_at\\",\\"canceled_at\\"],\\"conditions\\":[{\\"column\\":\\"ordered_at\\",\\"operator\\":\\">=\\",\\"value\\":\\"2022-05-01\\"},{\\"column\\":\\"ordered_at\\",\\"operator\\":\\"<=\\",\\"value\\":\\"2022-05-31\\"},{\\"column\\":\\"status\\",\\"operator\\":\\"=\\",\\"value\\":\\"fulfilled\\"},{\\"column\\":\\"delivered_at\\",\\"operator\\":\\">\\",\\"value\\":{\\"column_name\\":\\"expected_delivery_date\\"}}],\\"order_by\\":\\"asc\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 512, "completion_tokens": 132, "total_tokens": 644, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -534,6 +541,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvvX7eB1KsfeZj8VcF3z7G7SbaA", "object": "chat.completion", "created": 1727346163, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 79, "completion_tokens": 1, "total_tokens": 80, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -560,6 +568,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvwoKVWPQj2UPlAcAKM7s40GsRx", "object": "chat.completion", "created": 1727346164, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "refusal": "I\'m very sorry, but I can\'t assist with that."}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 12, "total_tokens": 91, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -610,6 +619,7 @@ class GetWeatherArgs(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvx6Z4dchiW2nya1N8KMsHFrQRE", "object": "chat.completion", "created": 1727346165, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_Y6qJ7ofLgOrBnMD5WbVAeiRV", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\":\\"Edinburgh\\",\\"country\\":\\"UK\\",\\"units\\":\\"c\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 76, "completion_tokens": 24, "total_tokens": 100, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_e45dabd248"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -683,6 +693,7 @@ class GetStockPrice(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvyvfNWKcl7Ohqos4UFrmMs1v4C", "object": "chat.completion", "created": 1727346166, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_fdNz3vOBKYgOIpMdWotB9MjY", "type": "function", "function": {"name": "GetWeatherArgs", "arguments": "{\\"city\\": \\"Edinburgh\\", \\"country\\": \\"GB\\", \\"units\\": \\"c\\"}"}}, {"id": "call_h1DWI1POMJLb0KwIyQHWXD4p", "type": "function", "function": {"name": "get_stock_price", "arguments": "{\\"ticker\\": \\"AAPL\\", \\"exchange\\": \\"NASDAQ\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 149, "completion_tokens": 60, "total_tokens": 209, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_b40fb1c6fb"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -765,6 +776,7 @@ def test_parse_strict_tools(client: OpenAI, respx_mock: MockRouter, monkeypatch: content_snapshot=snapshot( '{"id": "chatcmpl-ABfvzdvCI6RaIkiEFNjqGXCSYnlzf", "object": "chat.completion", "created": 1727346167, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": null, "tool_calls": [{"id": "call_CUdUoJpsWWVdxXntucvnol1M", "type": "function", "function": {"name": "get_weather", "arguments": "{\\"city\\":\\"San Francisco\\",\\"state\\":\\"CA\\"}"}}], "refusal": null}, "logprobs": null, "finish_reason": "tool_calls"}], "usage": {"prompt_tokens": 48, "completion_tokens": 19, "total_tokens": 67, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -842,6 +854,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABrDYCa8W1w66eUxKDO8TQF1m6trT", "object": "chat.completion", "created": 1727389540, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":58,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) @@ -918,6 +931,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABrDQWOiw0PK5JOsxl1D9ooeQgznq", "object": "chat.completion", "created": 1727389532, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"city\\":\\"San Francisco\\",\\"temperature\\":65,\\"units\\":\\"f\\"}", "refusal": null}, "logprobs": null, "finish_reason": "stop"}], "usage": {"prompt_tokens": 79, "completion_tokens": 14, "total_tokens": 93, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_5050236cbd"}' ), + path="/chat/completions", mock_client=async_client, respx_mock=respx_mock, ) diff --git a/tests/lib/responses/__init__.py b/tests/lib/responses/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py new file mode 100644 index 0000000000..d996127dcd --- /dev/null +++ b/tests/lib/responses/test_responses.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from typing_extensions import TypeVar + +import pytest +from respx import MockRouter +from inline_snapshot import snapshot + +from openai import OpenAI + +from ...conftest import base_url +from ..snapshots import make_snapshot_request + +_T = TypeVar("_T") + +# all the snapshots in this file are auto-generated from the live API +# +# you can update them with +# +# `OPENAI_LIVE=1 pytest --inline-snapshot=fix` + + +@pytest.mark.respx(base_url=base_url) +def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None: + response = make_snapshot_request( + lambda c: c.responses.create( + model="gpt-4o-mini", + input="What's the weather like in SF?", + ), + content_snapshot=snapshot( + '{"id": "resp_689a0b2545288193953c892439b42e2800b2e36c65a1fd4b", "object": "response", "created_at": 1754925861, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "msg_689a0b2637b08193ac478e568f49e3f900b2e36c65a1fd4b", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": "I can\'t provide real-time updates, but you can easily check the current weather in San Francisco using a weather website or app. Typically, San Francisco has cool, foggy summers and mild winters, so it\'s good to be prepared for variable weather!"}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 14, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 50, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 64}, "user": null, "metadata": {}}' + ), + path="/responses", + mock_client=client, + respx_mock=respx_mock, + ) + + assert response.output_text == snapshot( + "I can't provide real-time updates, but you can easily check the current weather in San Francisco using a weather website or app. Typically, San Francisco has cool, foggy summers and mild winters, so it's good to be prepared for variable weather!" + ) diff --git a/tests/lib/snapshots.py b/tests/lib/snapshots.py index 64b1163338..ed53edebcb 100644 --- a/tests/lib/snapshots.py +++ b/tests/lib/snapshots.py @@ -21,6 +21,7 @@ def make_snapshot_request( content_snapshot: Any, respx_mock: MockRouter, mock_client: OpenAI, + path: str, ) -> _T: live = os.environ.get("OPENAI_LIVE") == "1" if live: @@ -39,7 +40,7 @@ def _on_response(response: httpx.Response) -> None: ) ) else: - respx_mock.post("/chat/completions").mock( + respx_mock.post(path).mock( return_value=httpx.Response( 200, content=get_snapshot_value(content_snapshot), @@ -63,6 +64,7 @@ async def make_async_snapshot_request( content_snapshot: Any, respx_mock: MockRouter, mock_client: AsyncOpenAI, + path: str, ) -> _T: live = os.environ.get("OPENAI_LIVE") == "1" if live: @@ -81,7 +83,7 @@ async def _on_response(response: httpx.Response) -> None: ) ) else: - respx_mock.post("/chat/completions").mock( + respx_mock.post(path).mock( return_value=httpx.Response( 200, content=get_snapshot_value(content_snapshot), From 753d472ef8f14cda35bcd0a992813cb4af9ffef9 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 11 Aug 2025 16:30:26 +0100 Subject: [PATCH 72/90] fix(internal/tests): correct snapshot update comment --- tests/lib/chat/test_completions.py | 2 +- tests/lib/chat/test_completions_streaming.py | 2 +- tests/lib/responses/test_responses.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index 0371f6828b..f04a0e3782 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -25,7 +25,7 @@ # # you can update them with # -# `OPENAI_LIVE=1 pytest --inline-snapshot=fix` +# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""` @pytest.mark.respx(base_url=base_url) diff --git a/tests/lib/chat/test_completions_streaming.py b/tests/lib/chat/test_completions_streaming.py index 65826d28d9..fa17f67177 100644 --- a/tests/lib/chat/test_completions_streaming.py +++ b/tests/lib/chat/test_completions_streaming.py @@ -39,7 +39,7 @@ # # you can update them with # -# `OPENAI_LIVE=1 pytest --inline-snapshot=fix` +# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""` @pytest.mark.respx(base_url=base_url) diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py index d996127dcd..8ce3462e76 100644 --- a/tests/lib/responses/test_responses.py +++ b/tests/lib/responses/test_responses.py @@ -17,7 +17,7 @@ # # you can update them with # -# `OPENAI_LIVE=1 pytest --inline-snapshot=fix` +# `OPENAI_LIVE=1 pytest --inline-snapshot=fix -p no:xdist -o addopts=""` @pytest.mark.respx(base_url=base_url) From 37265a9d27e3596075d60499a0336698c11530d0 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 11 Aug 2025 21:13:29 +0100 Subject: [PATCH 73/90] fix(types): revert ChatCompletionMessageToolCallUnion breaking change --- src/openai/types/chat/__init__.py | 5 ++++- src/openai/types/chat/chat_completion_message_tool_call.py | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 2aecaf7d0c..50bdac7c65 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -31,7 +31,10 @@ from .chat_completion_tool_union_param import ChatCompletionToolUnionParam as ChatCompletionToolUnionParam from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText from .chat_completion_custom_tool_param import ChatCompletionCustomToolParam as ChatCompletionCustomToolParam -from .chat_completion_message_tool_call import ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion +from .chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall as ChatCompletionMessageToolCall, + ChatCompletionMessageToolCallUnion as ChatCompletionMessageToolCallUnion, +) from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam diff --git a/src/openai/types/chat/chat_completion_message_tool_call.py b/src/openai/types/chat/chat_completion_message_tool_call.py index be01179701..845e639089 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_tool_call.py @@ -13,3 +13,5 @@ Union[ChatCompletionMessageFunctionToolCall, ChatCompletionMessageCustomToolCall], PropertyInfo(discriminator="type"), ] + +ChatCompletionMessageToolCall: TypeAlias = ChatCompletionMessageToolCallUnion From a02ac0dd5b4797d4a782b4b75fd0790df3e14149 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 20:14:05 +0000 Subject: [PATCH 74/90] release: 1.99.8 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 21 +++++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 24 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 804a6039aa..5d9ceab581 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.7" + ".": "1.99.8" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 74d0da964a..33e0e8e948 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## 1.99.8 (2025-08-11) + +Full Changelog: [v1.99.7...v1.99.8](https://github.com/openai/openai-python/compare/v1.99.7...v1.99.8) + +### Bug Fixes + +* **internal/tests:** correct snapshot update comment ([2784a7a](https://github.com/openai/openai-python/commit/2784a7a7da24ddba74b5717f07d67546864472b9)) +* **types:** revert ChatCompletionMessageToolCallUnion breaking change ([ba54e03](https://github.com/openai/openai-python/commit/ba54e03bc2d21825d891685bf3bad4a9253cbeb0)) + + +### Chores + +* **internal/tests:** add inline snapshot format command ([8107db8](https://github.com/openai/openai-python/commit/8107db8ff738baa65fe4cf2f2d7f1acd29219c78)) +* **internal:** fix formatting ([f03a03d](https://github.com/openai/openai-python/commit/f03a03de8c84740209d021598ff8bf56b6d3c684)) +* **tests:** add responses output_text test ([971347b](https://github.com/openai/openai-python/commit/971347b3a05f79c51abd11c86b382ca73c28cefb)) + + +### Refactors + +* **tests:** share snapshot utils ([791c567](https://github.com/openai/openai-python/commit/791c567cd87fb8d587965773b1da0404c7848c68)) + ## 1.99.7 (2025-08-11) Full Changelog: [v1.99.6...v1.99.7](https://github.com/openai/openai-python/compare/v1.99.6...v1.99.7) diff --git a/pyproject.toml b/pyproject.toml index 97ec8cf43d..b4a7d01a2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.7" +version = "1.99.8" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 3db3f866cf..9d1f1f4e96 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.7" # x-release-please-version +__version__ = "1.99.8" # x-release-please-version From 064910b115e21837dd793390e6cfbeddd07e5f9a Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Tue, 12 Aug 2025 01:23:24 +0100 Subject: [PATCH 75/90] fix(types): actually fix ChatCompletionMessageToolCall type --- src/openai/types/chat/chat_completion_message_tool_call.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/openai/types/chat/chat_completion_message_tool_call.py b/src/openai/types/chat/chat_completion_message_tool_call.py index 845e639089..71ac63f58e 100644 --- a/src/openai/types/chat/chat_completion_message_tool_call.py +++ b/src/openai/types/chat/chat_completion_message_tool_call.py @@ -14,4 +14,4 @@ PropertyInfo(discriminator="type"), ] -ChatCompletionMessageToolCall: TypeAlias = ChatCompletionMessageToolCallUnion +ChatCompletionMessageToolCall: TypeAlias = ChatCompletionMessageFunctionToolCall From 34014aedbb8946c03e97e5c8d72e03ad2259cd7c Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 00:24:03 +0000 Subject: [PATCH 76/90] release: 1.99.9 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 5d9ceab581..2dfeb2d9bb 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.8" + ".": "1.99.9" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 33e0e8e948..392fb8b667 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.99.9 (2025-08-12) + +Full Changelog: [v1.99.8...v1.99.9](https://github.com/openai/openai-python/compare/v1.99.8...v1.99.9) + +### Bug Fixes + +* **types:** actually fix ChatCompletionMessageToolCall type ([20cb0c8](https://github.com/openai/openai-python/commit/20cb0c86d598e196386ff43db992f6497eb756d0)) + ## 1.99.8 (2025-08-11) Full Changelog: [v1.99.7...v1.99.8](https://github.com/openai/openai-python/compare/v1.99.7...v1.99.8) diff --git a/pyproject.toml b/pyproject.toml index b4a7d01a2b..ced6079b6d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.8" +version = "1.99.9" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 9d1f1f4e96..7d3b3da5d7 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.8" # x-release-please-version +__version__ = "1.99.9" # x-release-please-version From 0843a1116498bc3312db9904adf71a4fb0a0a77e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 15 Aug 2025 19:11:41 +0000 Subject: [PATCH 77/90] feat(api): add new text parameters, expiration options --- .stats.yml | 6 +- src/openai/resources/batches.py | 10 ++ .../resources/beta/realtime/realtime.py | 8 +- .../resources/beta/realtime/sessions.py | 4 +- .../beta/realtime/transcription_sessions.py | 4 +- .../resources/beta/threads/runs/runs.py | 12 +- src/openai/resources/beta/threads/threads.py | 12 +- .../resources/chat/completions/completions.py | 48 +++++--- src/openai/resources/files.py | 14 ++- src/openai/resources/responses/responses.py | 107 ++++++------------ src/openai/resources/uploads/uploads.py | 10 ++ src/openai/types/batch_create_params.py | 23 +++- src/openai/types/beta/realtime/session.py | 2 +- .../beta/realtime/session_create_params.py | 2 +- .../beta/realtime/session_update_event.py | 2 +- .../realtime/session_update_event_param.py | 2 +- .../transcription_session_create_params.py | 2 +- .../realtime/transcription_session_update.py | 2 +- .../transcription_session_update_param.py | 2 +- .../beta/thread_create_and_run_params.py | 2 +- src/openai/types/beta/threads/run.py | 2 +- .../types/beta/threads/run_create_params.py | 2 +- src/openai/types/chat/chat_completion.py | 5 +- .../types/chat/chat_completion_chunk.py | 5 +- .../types/chat/completion_create_params.py | 18 ++- src/openai/types/file_create_params.py | 25 +++- src/openai/types/responses/__init__.py | 2 - src/openai/types/responses/response.py | 46 +++++--- .../types/responses/response_create_params.py | 45 +++++--- .../types/responses/response_text_config.py | 35 ------ .../responses/response_text_config_param.py | 36 ------ src/openai/types/upload_create_params.py | 25 +++- tests/api_resources/chat/test_completions.py | 4 + tests/api_resources/test_batches.py | 8 ++ tests/api_resources/test_files.py | 24 ++++ tests/api_resources/test_responses.py | 4 +- tests/api_resources/test_uploads.py | 28 +++++ 37 files changed, 343 insertions(+), 245 deletions(-) delete mode 100644 src/openai/types/responses/response_text_config.py delete mode 100644 src/openai/types/responses/response_text_config_param.py diff --git a/.stats.yml b/.stats.yml index a098c3d40d..66c46e7730 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9cadfad609f94f20ebf74fdc06a80302f1a324dc69700a309a8056aabca82fd2.yml -openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: 68337b532875626269c304372a669f67 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml +openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15 +config_hash: ed87b9139ac595a04a2162d754df2fed diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 26ea498b31..2340bd2e32 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -49,6 +49,7 @@ def create( endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -85,6 +86,9 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + output_expires_after: The expiration policy for the output and/or error file that are generated for a + batch. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -101,6 +105,7 @@ def create( "endpoint": endpoint, "input_file_id": input_file_id, "metadata": metadata, + "output_expires_after": output_expires_after, }, batch_create_params.BatchCreateParams, ), @@ -259,6 +264,7 @@ async def create( endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -295,6 +301,9 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + output_expires_after: The expiration policy for the output and/or error file that are generated for a + batch. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -311,6 +320,7 @@ async def create( "endpoint": endpoint, "input_file_id": input_file_id, "metadata": metadata, + "output_expires_after": output_expires_after, }, batch_create_params.BatchCreateParams, ), diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index 8e1b558cf3..7b99c7f6c4 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -652,8 +652,8 @@ def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | Not """Send this event to cancel an in-progress response. The server will respond - with a `response.cancelled` event or an error if there is no response to - cancel. + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. """ self._connection.send( cast( @@ -904,8 +904,8 @@ async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str """Send this event to cancel an in-progress response. The server will respond - with a `response.cancelled` event or an error if there is no response to - cancel. + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. """ await self._connection.send( cast( diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index e639c0ba43..eaddb384ce 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -152,7 +152,7 @@ def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer @@ -334,7 +334,7 @@ async def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer diff --git a/src/openai/resources/beta/realtime/transcription_sessions.py b/src/openai/resources/beta/realtime/transcription_sessions.py index 5f97b3c8e3..54fe7d5a6c 100644 --- a/src/openai/resources/beta/realtime/transcription_sessions.py +++ b/src/openai/resources/beta/realtime/transcription_sessions.py @@ -96,7 +96,7 @@ def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer @@ -209,7 +209,7 @@ async def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 01246d7c12..07b43e6471 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -220,7 +220,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -370,7 +370,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -520,7 +520,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1650,7 +1650,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1800,7 +1800,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1950,7 +1950,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index ff2a41155d..dbe47d2d0e 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -393,7 +393,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -527,7 +527,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -661,7 +661,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1251,7 +1251,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1385,7 +1385,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1519,7 +1519,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 9404d85192..bc5fe0fc05 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -103,6 +103,7 @@ def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -203,6 +204,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -265,6 +267,7 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -438,9 +441,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -554,6 +556,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -736,9 +739,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -843,6 +845,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1025,9 +1028,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1132,6 +1134,7 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1178,6 +1181,7 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -1400,6 +1404,7 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1470,6 +1475,7 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, @@ -1542,6 +1548,7 @@ async def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1642,6 +1649,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -1704,6 +1712,7 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1877,9 +1886,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1993,6 +2001,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2175,9 +2184,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -2282,6 +2290,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2464,9 +2473,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -2571,6 +2579,7 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2617,6 +2626,7 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -2839,6 +2849,7 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -2910,6 +2921,7 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index 179af870ba..b45b8f303f 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -57,6 +57,7 @@ def create( *, file: FileTypes, purpose: FilePurpose, + expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -68,7 +69,7 @@ def create( Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. + to 1 TB. The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -96,6 +97,9 @@ def create( fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -108,6 +112,7 @@ def create( { "file": file, "purpose": purpose, + "expires_after": expires_after, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) @@ -369,6 +374,7 @@ async def create( *, file: FileTypes, purpose: FilePurpose, + expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -380,7 +386,7 @@ async def create( Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. + to 1 TB. The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -408,6 +414,9 @@ async def create( fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -420,6 +429,7 @@ async def create( { "file": file, "purpose": purpose, + "expires_after": expires_after, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 8983daf278..97ad0faa94 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -43,7 +43,6 @@ from ...types.responses.response_input_param import ResponseInputParam from ...types.responses.response_prompt_param import ResponsePromptParam from ...types.responses.response_stream_event import ResponseStreamEvent -from ...types.responses.response_text_config_param import ResponseTextConfigParam __all__ = ["Responses", "AsyncResponses"] @@ -95,7 +94,7 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -195,7 +194,7 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -214,9 +213,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -240,12 +238,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -323,7 +315,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -430,7 +422,7 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -449,9 +441,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -468,12 +459,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -551,7 +536,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -658,7 +643,7 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -677,9 +662,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -696,12 +680,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -778,7 +756,7 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -869,7 +847,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -901,7 +879,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -1030,7 +1008,7 @@ def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1461,7 +1439,7 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1561,7 +1539,7 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -1580,9 +1558,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1606,12 +1583,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1689,7 +1660,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1796,7 +1767,7 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -1815,9 +1786,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1834,12 +1804,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1917,7 +1881,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2024,7 +1988,7 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -2043,9 +2007,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -2062,12 +2025,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -2144,7 +2101,7 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2235,7 +2192,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2267,7 +2224,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2400,7 +2357,7 @@ async def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index ecfcee4800..125a45e33c 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -170,6 +170,7 @@ def create( filename: str, mime_type: str, purpose: FilePurpose, + expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -213,6 +214,9 @@ def create( See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -229,6 +233,7 @@ def create( "filename": filename, "mime_type": mime_type, "purpose": purpose, + "expires_after": expires_after, }, upload_create_params.UploadCreateParams, ), @@ -473,6 +478,7 @@ async def create( filename: str, mime_type: str, purpose: FilePurpose, + expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -516,6 +522,9 @@ async def create( See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -532,6 +541,7 @@ async def create( "filename": filename, "mime_type": mime_type, "purpose": purpose, + "expires_after": expires_after, }, upload_create_params.UploadCreateParams, ), diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index cc95afd3ba..c0f9034d5e 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -7,7 +7,7 @@ from .shared_params.metadata import Metadata -__all__ = ["BatchCreateParams"] +__all__ = ["BatchCreateParams", "OutputExpiresAfter"] class BatchCreateParams(TypedDict, total=False): @@ -47,3 +47,24 @@ class BatchCreateParams(TypedDict, total=False): Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. """ + + output_expires_after: OutputExpiresAfter + """ + The expiration policy for the output and/or error file that are generated for a + batch. + """ + + +class OutputExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. Note that the anchor is the file creation time, + not the time the batch is created. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index f84b3ee4a0..f478a92fbb 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -260,7 +260,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index 6be09d8bae..8a477f9843 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -137,7 +137,7 @@ class SessionCreateParams(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 5b4185dbf6..11929ab376 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -282,7 +282,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index 3063449bfd..e939f4cc79 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -280,7 +280,7 @@ class Session(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/transcription_session_create_params.py b/src/openai/types/beta/realtime/transcription_session_create_params.py index 15b2f14c14..3ac3af4fa9 100644 --- a/src/openai/types/beta/realtime/transcription_session_create_params.py +++ b/src/openai/types/beta/realtime/transcription_session_create_params.py @@ -61,7 +61,7 @@ class TranscriptionSessionCreateParams(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/transcription_session_update.py b/src/openai/types/beta/realtime/transcription_session_update.py index 73253b6848..5ae1ad226d 100644 --- a/src/openai/types/beta/realtime/transcription_session_update.py +++ b/src/openai/types/beta/realtime/transcription_session_update.py @@ -165,7 +165,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/transcription_session_update_param.py b/src/openai/types/beta/realtime/transcription_session_update_param.py index 6b38a9af39..d7065f61c7 100644 --- a/src/openai/types/beta/realtime/transcription_session_update_param.py +++ b/src/openai/types/beta/realtime/transcription_session_update_param.py @@ -165,7 +165,7 @@ class Session(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index d813710579..ad148d693a 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -169,7 +169,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index da9418d6f9..c545cc3759 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -228,7 +228,7 @@ class Run(BaseModel): truncation_strategy: Optional[TruncationStrategy] = None """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ usage: Optional[Usage] = None diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index f9defcb19c..cfd272f5ad 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -176,7 +176,7 @@ class RunCreateParamsBase(TypedDict, total=False): truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 42463f7ec8..6bc4bafe79 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -68,9 +68,8 @@ class ChatCompletion(BaseModel): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 082bb6cc19..ea32d157ef 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -137,9 +137,8 @@ class ChatCompletionChunk(BaseModel): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index a3bc90b0a2..3ebab45b56 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -25,6 +25,7 @@ "FunctionCall", "Function", "ResponseFormat", + "Text", "WebSearchOptions", "WebSearchOptionsUserLocation", "WebSearchOptionsUserLocationApproximate", @@ -233,9 +234,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -271,6 +271,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ + text: Text + tool_choice: ChatCompletionToolChoiceOptionParam """ Controls which (if any) tool is called by the model. `none` means the model will @@ -365,6 +367,16 @@ class Function(TypedDict, total=False): ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject] +class Text(TypedDict, total=False): + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): city: str """Free text input for the city of the user, e.g. `San Francisco`.""" diff --git a/src/openai/types/file_create_params.py b/src/openai/types/file_create_params.py index 728dfd350f..f4583b16a3 100644 --- a/src/openai/types/file_create_params.py +++ b/src/openai/types/file_create_params.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes from .file_purpose import FilePurpose -__all__ = ["FileCreateParams"] +__all__ = ["FileCreateParams", "ExpiresAfter"] class FileCreateParams(TypedDict, total=False): @@ -22,3 +22,24 @@ class FileCreateParams(TypedDict, total=False): fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets """ + + expires_after: ExpiresAfter + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files + are persisted until they are manually deleted. + """ + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 74d8688081..72ec741f91 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -42,7 +42,6 @@ from .response_input_param import ResponseInputParam as ResponseInputParam from .response_output_item import ResponseOutputItem as ResponseOutputItem from .response_output_text import ResponseOutputText as ResponseOutputText -from .response_text_config import ResponseTextConfig as ResponseTextConfig from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent from .response_prompt_param import ResponsePromptParam as ResponsePromptParam @@ -76,7 +75,6 @@ from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam -from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 5ebb18fda4..49e38a46fe 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -18,11 +18,11 @@ from .tool_choice_allowed import ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions from .response_output_item import ResponseOutputItem -from .response_text_config import ResponseTextConfig from .tool_choice_function import ToolChoiceFunction from ..shared.responses_model import ResponsesModel +from .response_format_text_config import ResponseFormatTextConfig -__all__ = ["Response", "IncompleteDetails", "ToolChoice"] +__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Text"] class IncompleteDetails(BaseModel): @@ -35,6 +35,32 @@ class IncompleteDetails(BaseModel): ] +class Text(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + verbosity: Optional[Literal["low", "medium", "high"]] = None + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + class Response(BaseModel): id: str """Unique identifier for this Response.""" @@ -177,7 +203,7 @@ class Response(BaseModel): """ reasoning: Optional[Reasoning] = None - """**o-series models only** + """**gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -201,9 +227,8 @@ class Response(BaseModel): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -219,14 +244,7 @@ class Response(BaseModel): `incomplete`. """ - text: Optional[ResponseTextConfig] = None - """Configuration options for a text response from the model. - - Can be plain text or structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - """ + text: Optional[Text] = None top_logprobs: Optional[int] = None """ diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index ea91fa1265..89afccf06b 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -16,13 +16,14 @@ from ..shared_params.reasoning import Reasoning from .tool_choice_custom_param import ToolChoiceCustomParam from .tool_choice_allowed_param import ToolChoiceAllowedParam -from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam from ..shared_params.responses_model import ResponsesModel +from .response_format_text_config_param import ResponseFormatTextConfigParam __all__ = [ "ResponseCreateParamsBase", "StreamOptions", + "Text", "ToolChoice", "ResponseCreateParamsNonStreaming", "ResponseCreateParamsStreaming", @@ -134,7 +135,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): """ reasoning: Optional[Reasoning] - """**o-series models only** + """**gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -158,9 +159,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -183,14 +183,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - text: ResponseTextConfigParam - """Configuration options for a text response from the model. - - Can be plain text or structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - """ + text: Text tool_choice: ToolChoice """ @@ -267,6 +260,32 @@ class StreamOptions(TypedDict, total=False): """ +class Text(TypedDict, total=False): + format: ResponseFormatTextConfigParam + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + ToolChoice: TypeAlias = Union[ ToolChoiceOptions, ToolChoiceAllowedParam, diff --git a/src/openai/types/responses/response_text_config.py b/src/openai/types/responses/response_text_config.py deleted file mode 100644 index c53546da6d..0000000000 --- a/src/openai/types/responses/response_text_config.py +++ /dev/null @@ -1,35 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .response_format_text_config import ResponseFormatTextConfig - -__all__ = ["ResponseTextConfig"] - - -class ResponseTextConfig(BaseModel): - format: Optional[ResponseFormatTextConfig] = None - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] = None - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ diff --git a/src/openai/types/responses/response_text_config_param.py b/src/openai/types/responses/response_text_config_param.py deleted file mode 100644 index 1229fce35b..0000000000 --- a/src/openai/types/responses/response_text_config_param.py +++ /dev/null @@ -1,36 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal, TypedDict - -from .response_format_text_config_param import ResponseFormatTextConfigParam - -__all__ = ["ResponseTextConfigParam"] - - -class ResponseTextConfigParam(TypedDict, total=False): - format: ResponseFormatTextConfigParam - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ diff --git a/src/openai/types/upload_create_params.py b/src/openai/types/upload_create_params.py index 2ebabe6c66..ab4cded81d 100644 --- a/src/openai/types/upload_create_params.py +++ b/src/openai/types/upload_create_params.py @@ -2,11 +2,11 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict from .file_purpose import FilePurpose -__all__ = ["UploadCreateParams"] +__all__ = ["UploadCreateParams", "ExpiresAfter"] class UploadCreateParams(TypedDict, total=False): @@ -29,3 +29,24 @@ class UploadCreateParams(TypedDict, total=False): See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). """ + + expires_after: ExpiresAfter + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files + are persisted until they are manually deleted. + """ + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 358ea18cbb..885c3bd9a6 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -86,6 +86,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -218,6 +219,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -527,6 +529,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -659,6 +662,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index 6775094a58..95b94c4846 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -34,6 +34,10 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: endpoint="/v1/responses", input_file_id="string", metadata={"foo": "string"}, + output_expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, ) assert_matches_type(Batch, batch, path=["response"]) @@ -196,6 +200,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> endpoint="/v1/responses", input_file_id="string", metadata={"foo": "string"}, + output_expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, ) assert_matches_type(Batch, batch, path=["response"]) diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index fc4bb4a18e..67c809f155 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -31,6 +31,18 @@ def test_method_create(self, client: OpenAI) -> None: ) assert_matches_type(FileObject, file, path=["response"]) + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + file = client.files.create( + file=b"raw file contents", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(FileObject, file, path=["response"]) + @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.files.with_raw_response.create( @@ -272,6 +284,18 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: ) assert_matches_type(FileObject, file, path=["response"]) + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + file = await async_client.files.create( + file=b"raw file contents", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(FileObject, file, path=["response"]) + @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.create( diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 310800b87e..868ab3a4ca 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -10,9 +10,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai._utils import assert_signatures_in_sync -from openai.types.responses import ( - Response, -) +from openai.types.responses import Response base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py index 72a2f6c83d..0e438a3c61 100644 --- a/tests/api_resources/test_uploads.py +++ b/tests/api_resources/test_uploads.py @@ -27,6 +27,20 @@ def test_method_create(self, client: OpenAI) -> None: ) assert_matches_type(Upload, upload, path=["response"]) + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + upload = client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(Upload, upload, path=["response"]) + @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.uploads.with_raw_response.create( @@ -162,6 +176,20 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: ) assert_matches_type(Upload, upload, path=["response"]) + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + upload = await async_client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(Upload, upload, path=["response"]) + @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.uploads.with_raw_response.create( From adb1af8073391a6d58be9c13cfa0664c04d859e2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 05:06:39 +0000 Subject: [PATCH 78/90] release: 1.100.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2dfeb2d9bb..e1f6d3e50c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.9" + ".": "1.100.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 392fb8b667..0adb892623 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.100.0 (2025-08-18) + +Full Changelog: [v1.99.9...v1.100.0](https://github.com/openai/openai-python/compare/v1.99.9...v1.100.0) + +### Features + +* **api:** add new text parameters, expiration options ([e3dfa7c](https://github.com/openai/openai-python/commit/e3dfa7c417b8c750ff62d98650e75e72ad9b1477)) + ## 1.99.9 (2025-08-12) Full Changelog: [v1.99.8...v1.99.9](https://github.com/openai/openai-python/compare/v1.99.8...v1.99.9) diff --git a/pyproject.toml b/pyproject.toml index ced6079b6d..5fc0396a46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.9" +version = "1.100.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 7d3b3da5d7..d666729b59 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.9" # x-release-please-version +__version__ = "1.100.0" # x-release-please-version From b3547d662e76974b8c6a670eff8c5a05f8bb7f4c Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 18 Aug 2025 16:35:21 -0400 Subject: [PATCH 79/90] fix(types): revert response text config deletion --- src/openai/types/responses/__init__.py | 2 ++ .../types/responses/response_text_config.py | 35 ++++++++++++++++++ .../responses/response_text_config_param.py | 36 +++++++++++++++++++ 3 files changed, 73 insertions(+) create mode 100644 src/openai/types/responses/response_text_config.py create mode 100644 src/openai/types/responses/response_text_config_param.py diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 72ec741f91..74d8688081 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -42,6 +42,7 @@ from .response_input_param import ResponseInputParam as ResponseInputParam from .response_output_item import ResponseOutputItem as ResponseOutputItem from .response_output_text import ResponseOutputText as ResponseOutputText +from .response_text_config import ResponseTextConfig as ResponseTextConfig from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent from .response_prompt_param import ResponsePromptParam as ResponsePromptParam @@ -75,6 +76,7 @@ from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam +from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig diff --git a/src/openai/types/responses/response_text_config.py b/src/openai/types/responses/response_text_config.py new file mode 100644 index 0000000000..c53546da6d --- /dev/null +++ b/src/openai/types/responses/response_text_config.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .response_format_text_config import ResponseFormatTextConfig + +__all__ = ["ResponseTextConfig"] + + +class ResponseTextConfig(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + verbosity: Optional[Literal["low", "medium", "high"]] = None + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ diff --git a/src/openai/types/responses/response_text_config_param.py b/src/openai/types/responses/response_text_config_param.py new file mode 100644 index 0000000000..1229fce35b --- /dev/null +++ b/src/openai/types/responses/response_text_config_param.py @@ -0,0 +1,36 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, TypedDict + +from .response_format_text_config_param import ResponseFormatTextConfigParam + +__all__ = ["ResponseTextConfigParam"] + + +class ResponseTextConfigParam(TypedDict, total=False): + format: ResponseFormatTextConfigParam + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ From f889071b8f64739998b7ac31df045881cf5bec62 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 20:40:53 +0000 Subject: [PATCH 80/90] release: 1.100.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e1f6d3e50c..6fb2e7075d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.100.0" + ".": "1.100.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 0adb892623..4f3362af2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.100.1 (2025-08-18) + +Full Changelog: [v1.100.0...v1.100.1](https://github.com/openai/openai-python/compare/v1.100.0...v1.100.1) + +### Bug Fixes + +* **types:** revert response text config deletion ([ac4fb19](https://github.com/openai/openai-python/commit/ac4fb1922ae125c8310c30e402932e8bb2976f58)) + ## 1.100.0 (2025-08-18) Full Changelog: [v1.99.9...v1.100.0](https://github.com/openai/openai-python/compare/v1.99.9...v1.100.0) diff --git a/pyproject.toml b/pyproject.toml index 5fc0396a46..a9baee6a55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.100.0" +version = "1.100.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index d666729b59..608d190655 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.100.0" # x-release-please-version +__version__ = "1.100.1" # x-release-please-version From a94bd5b239ad73b1f6f7cf11a2fa9d9279096321 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 13:48:27 +0000 Subject: [PATCH 81/90] chore(api): accurately represent shape for verbosity on Chat Completions --- .stats.yml | 6 +- .../resources/chat/completions/completions.py | 30 ++------- src/openai/resources/responses/responses.py | 65 +++++++++++++++---- .../types/chat/completion_create_params.py | 15 +---- .../types/graders/text_similarity_grader.py | 16 ++++- .../graders/text_similarity_grader_param.py | 16 ++++- src/openai/types/responses/response.py | 39 +++-------- .../types/responses/response_create_params.py | 38 +++-------- tests/api_resources/chat/test_completions.py | 4 -- tests/api_resources/test_responses.py | 4 +- tests/lib/chat/test_completions.py | 2 +- 11 files changed, 110 insertions(+), 125 deletions(-) diff --git a/.stats.yml b/.stats.yml index 66c46e7730..81c991168c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml -openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15 -config_hash: ed87b9139ac595a04a2162d754df2fed +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml +openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063 +config_hash: 76afa3236f36854a8705f1281b1990b8 diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index bc5fe0fc05..7e209ff0ee 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -103,7 +103,6 @@ def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -204,7 +203,6 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), - "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -267,7 +265,6 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -459,7 +456,7 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -556,7 +553,6 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -757,7 +753,7 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -845,7 +841,6 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1046,7 +1041,7 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -1134,7 +1129,6 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1181,7 +1175,6 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -1404,7 +1397,6 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1475,7 +1467,6 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, - text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, @@ -1548,7 +1539,6 @@ async def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1649,7 +1639,6 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), - "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -1712,7 +1701,6 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1904,7 +1892,7 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -2001,7 +1989,6 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2202,7 +2189,7 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -2290,7 +2277,6 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2491,7 +2477,7 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -2579,7 +2565,6 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2626,7 +2611,6 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -2849,7 +2833,6 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -2921,7 +2904,6 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, - text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 97ad0faa94..375f8b7e71 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -43,6 +43,7 @@ from ...types.responses.response_input_param import ResponseInputParam from ...types.responses.response_prompt_param import ResponsePromptParam from ...types.responses.response_stream_event import ResponseStreamEvent +from ...types.responses.response_text_config_param import ResponseTextConfigParam __all__ = ["Responses", "AsyncResponses"] @@ -94,7 +95,7 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -238,6 +239,12 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -315,7 +322,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -459,6 +466,12 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -536,7 +549,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -680,6 +693,12 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -756,7 +775,7 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -847,7 +866,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -879,7 +898,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -1008,7 +1027,7 @@ def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1439,7 +1458,7 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1583,6 +1602,12 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1660,7 +1685,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1804,6 +1829,12 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1881,7 +1912,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2025,6 +2056,12 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -2101,7 +2138,7 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2192,7 +2229,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2224,7 +2261,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2357,7 +2394,7 @@ async def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 3ebab45b56..da37ee4c13 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -25,7 +25,6 @@ "FunctionCall", "Function", "ResponseFormat", - "Text", "WebSearchOptions", "WebSearchOptionsUserLocation", "WebSearchOptionsUserLocationApproximate", @@ -257,7 +256,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. """ stream_options: Optional[ChatCompletionStreamOptionsParam] @@ -271,8 +270,6 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - text: Text - tool_choice: ChatCompletionToolChoiceOptionParam """ Controls which (if any) tool is called by the model. `none` means the model will @@ -367,16 +364,6 @@ class Function(TypedDict, total=False): ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject] -class Text(TypedDict, total=False): - verbosity: Optional[Literal["low", "medium", "high"]] - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ - - class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): city: str """Free text input for the city of the user, e.g. `San Francisco`.""" diff --git a/src/openai/types/graders/text_similarity_grader.py b/src/openai/types/graders/text_similarity_grader.py index 738d317766..9082ac8969 100644 --- a/src/openai/types/graders/text_similarity_grader.py +++ b/src/openai/types/graders/text_similarity_grader.py @@ -9,12 +9,22 @@ class TextSimilarityGrader(BaseModel): evaluation_metric: Literal[ - "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" + "cosine", + "fuzzy_match", + "bleu", + "gleu", + "meteor", + "rouge_1", + "rouge_2", + "rouge_3", + "rouge_4", + "rouge_5", + "rouge_l", ] """The evaluation metric to use. - One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, - `rouge_4`, `rouge_5`, or `rouge_l`. + One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, + `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. """ input: str diff --git a/src/openai/types/graders/text_similarity_grader_param.py b/src/openai/types/graders/text_similarity_grader_param.py index db14553217..1646afc84b 100644 --- a/src/openai/types/graders/text_similarity_grader_param.py +++ b/src/openai/types/graders/text_similarity_grader_param.py @@ -10,13 +10,23 @@ class TextSimilarityGraderParam(TypedDict, total=False): evaluation_metric: Required[ Literal[ - "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" + "cosine", + "fuzzy_match", + "bleu", + "gleu", + "meteor", + "rouge_1", + "rouge_2", + "rouge_3", + "rouge_4", + "rouge_5", + "rouge_l", ] ] """The evaluation metric to use. - One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, - `rouge_4`, `rouge_5`, or `rouge_l`. + One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, + `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. """ input: Required[str] diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 49e38a46fe..49f60bbc5c 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -18,11 +18,11 @@ from .tool_choice_allowed import ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions from .response_output_item import ResponseOutputItem +from .response_text_config import ResponseTextConfig from .tool_choice_function import ToolChoiceFunction from ..shared.responses_model import ResponsesModel -from .response_format_text_config import ResponseFormatTextConfig -__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Text"] +__all__ = ["Response", "IncompleteDetails", "ToolChoice"] class IncompleteDetails(BaseModel): @@ -35,32 +35,6 @@ class IncompleteDetails(BaseModel): ] -class Text(BaseModel): - format: Optional[ResponseFormatTextConfig] = None - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] = None - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ - - class Response(BaseModel): id: str """Unique identifier for this Response.""" @@ -244,7 +218,14 @@ class Response(BaseModel): `incomplete`. """ - text: Optional[Text] = None + text: Optional[ResponseTextConfig] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ top_logprobs: Optional[int] = None """ diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 89afccf06b..0cd761fcf0 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -16,14 +16,13 @@ from ..shared_params.reasoning import Reasoning from .tool_choice_custom_param import ToolChoiceCustomParam from .tool_choice_allowed_param import ToolChoiceAllowedParam +from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam from ..shared_params.responses_model import ResponsesModel -from .response_format_text_config_param import ResponseFormatTextConfigParam __all__ = [ "ResponseCreateParamsBase", "StreamOptions", - "Text", "ToolChoice", "ResponseCreateParamsNonStreaming", "ResponseCreateParamsStreaming", @@ -183,7 +182,14 @@ class ResponseCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - text: Text + text: ResponseTextConfigParam + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ tool_choice: ToolChoice """ @@ -260,32 +266,6 @@ class StreamOptions(TypedDict, total=False): """ -class Text(TypedDict, total=False): - format: ResponseFormatTextConfigParam - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ - - ToolChoice: TypeAlias = Union[ ToolChoiceOptions, ToolChoiceAllowedParam, diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 885c3bd9a6..358ea18cbb 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -86,7 +86,6 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -219,7 +218,6 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -529,7 +527,6 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -662,7 +659,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 868ab3a4ca..310800b87e 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -10,7 +10,9 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai._utils import assert_signatures_in_sync -from openai.types.responses import Response +from openai.types.responses import ( + Response, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index f04a0e3782..f69bc09ca3 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -541,7 +541,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvvX7eB1KsfeZj8VcF3z7G7SbaA", "object": "chat.completion", "created": 1727346163, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 79, "completion_tokens": 1, "total_tokens": 80, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), - path="/chat/completions", + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) From 4ada66f8f86473f342aa032ed021b62180422dc1 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 14:10:47 +0000 Subject: [PATCH 82/90] release: 1.100.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6fb2e7075d..8910831376 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.100.1" + ".": "1.100.2" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f3362af2f..2254a59f75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.100.2 (2025-08-19) + +Full Changelog: [v1.100.1...v1.100.2](https://github.com/openai/openai-python/compare/v1.100.1...v1.100.2) + +### Chores + +* **api:** accurately represent shape for verbosity on Chat Completions ([c39d5fd](https://github.com/openai/openai-python/commit/c39d5fd3f5429c6d41f257669a1dd4c67a477455)) + ## 1.100.1 (2025-08-18) Full Changelog: [v1.100.0...v1.100.1](https://github.com/openai/openai-python/compare/v1.100.0...v1.100.1) diff --git a/pyproject.toml b/pyproject.toml index a9baee6a55..c8c3d2fd2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.100.1" +version = "1.100.2" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 608d190655..29840a21b8 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.100.1" # x-release-please-version +__version__ = "1.100.2" # x-release-please-version From 72e0ad60f0a6cb2c7d39651c7217b3dd1e86315b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 19:38:10 +0000 Subject: [PATCH 83/90] chore(internal/ci): setup breaking change detection --- .github/workflows/detect-breaking-changes.yml | 42 ++++++++++ .stats.yml | 2 +- pyproject.toml | 1 + requirements-dev.lock | 3 + scripts/detect-breaking-changes | 24 ++++++ scripts/detect-breaking-changes.py | 79 +++++++++++++++++++ 6 files changed, 150 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/detect-breaking-changes.yml create mode 100755 scripts/detect-breaking-changes create mode 100644 scripts/detect-breaking-changes.py diff --git a/.github/workflows/detect-breaking-changes.yml b/.github/workflows/detect-breaking-changes.yml new file mode 100644 index 0000000000..f10fdf3b19 --- /dev/null +++ b/.github/workflows/detect-breaking-changes.yml @@ -0,0 +1,42 @@ +name: CI +on: + pull_request: + branches: + - main + - next + +jobs: + detect_breaking_changes: + runs-on: 'ubuntu-latest' + name: detect-breaking-changes + if: github.repository == 'openai/openai-python' + steps: + - name: Calculate fetch-depth + run: | + echo "FETCH_DEPTH=$(expr ${{ github.event.pull_request.commits }} + 1)" >> $GITHUB_ENV + + - uses: actions/checkout@v4 + with: + # Ensure we can check out the pull request base in the script below. + fetch-depth: ${{ env.FETCH_DEPTH }} + + - name: Install Rye + run: | + curl -sSf https://rye.astral.sh/get | bash + echo "$HOME/.rye/shims" >> $GITHUB_PATH + env: + RYE_VERSION: '0.44.0' + RYE_INSTALL_OPTION: '--yes' + - name: Install dependencies + run: | + rye sync --all-features + - name: Detect removed symbols + run: | + rye run python scripts/detect-breaking-changes.py "${{ github.event.pull_request.base.sha }}" + + - name: Detect breaking changes + run: | + # Try to check out previous versions of the breaking change detection script. This ensures that + # we still detect breaking changes when entire files and their tests are removed. + git checkout "${{ github.event.pull_request.base.sha }}" -- ./scripts/detect-breaking-changes 2>/dev/null || true + ./scripts/detect-breaking-changes ${{ github.event.pull_request.base.sha }} \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 81c991168c..d4994342f7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063 -config_hash: 76afa3236f36854a8705f1281b1990b8 +config_hash: 4870312b04f48fd717ea4151053e7fb9 diff --git a/pyproject.toml b/pyproject.toml index c8c3d2fd2b..eb1f588896 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,6 +71,7 @@ dev-dependencies = [ "trio >=0.22.2", "nest_asyncio==1.6.0", "pytest-xdist>=3.6.1", + "griffe>=1", ] [tool.rye.scripts] diff --git a/requirements-dev.lock b/requirements-dev.lock index b1886e036f..e619cb6b64 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -44,6 +44,8 @@ cffi==1.16.0 # via sounddevice charset-normalizer==3.3.2 # via requests +colorama==0.4.6 + # via griffe colorlog==6.7.0 # via nox cryptography==42.0.7 @@ -68,6 +70,7 @@ filelock==3.12.4 frozenlist==1.7.0 # via aiohttp # via aiosignal +griffe==1.12.1 h11==0.16.0 # via httpcore httpcore==1.0.9 diff --git a/scripts/detect-breaking-changes b/scripts/detect-breaking-changes new file mode 100755 index 0000000000..833872ef3a --- /dev/null +++ b/scripts/detect-breaking-changes @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Detecting breaking changes" + +TEST_PATHS=( + tests/api_resources + tests/test_client.py + tests/test_response.py + tests/test_legacy_response.py +) + +for PATHSPEC in "${TEST_PATHS[@]}"; do + # Try to check out previous versions of the test files + # with the current SDK. + git checkout "$1" -- "${PATHSPEC}" 2>/dev/null || true +done + +# Instead of running the tests, use the linter to check if an +# older test is no longer compatible with the latest SDK. +./scripts/lint diff --git a/scripts/detect-breaking-changes.py b/scripts/detect-breaking-changes.py new file mode 100644 index 0000000000..3a30f3db2f --- /dev/null +++ b/scripts/detect-breaking-changes.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import sys +from typing import Iterator +from pathlib import Path + +import rich +import griffe +from rich.text import Text +from rich.style import Style + + +def public_members(obj: griffe.Object | griffe.Alias) -> dict[str, griffe.Object | griffe.Alias]: + if isinstance(obj, griffe.Alias): + # ignore imports for now, they're technically part of the public API + # but we don't have good preventative measures in place to prevent + # changing them + return {} + + return {name: value for name, value in obj.all_members.items() if not name.startswith("_")} + + +def find_breaking_changes( + new_obj: griffe.Object | griffe.Alias, + old_obj: griffe.Object | griffe.Alias, + *, + path: list[str], +) -> Iterator[Text | str]: + new_members = public_members(new_obj) + old_members = public_members(old_obj) + + for name, old_member in old_members.items(): + if isinstance(old_member, griffe.Alias) and len(path) > 2: + # ignore imports in `/types/` for now, they're technically part of the public API + # but we don't have good preventative measures in place to prevent changing them + continue + + new_member = new_members.get(name) + if new_member is None: + cls_name = old_member.__class__.__name__ + yield Text(f"({cls_name})", style=Style(color="rgb(119, 119, 119)")) + yield from [" " for _ in range(10 - len(cls_name))] + yield f" {'.'.join(path)}.{name}" + yield "\n" + continue + + yield from find_breaking_changes(new_member, old_member, path=[*path, name]) + + +def main() -> None: + try: + against_ref = sys.argv[1] + except IndexError as err: + raise RuntimeError("You must specify a base ref to run breaking change detection against") from err + + package = griffe.load( + "openai", + search_paths=[Path(__file__).parent.parent.joinpath("src")], + ) + old_package = griffe.load_git( + "openai", + ref=against_ref, + search_paths=["src"], + ) + assert isinstance(package, griffe.Module) + assert isinstance(old_package, griffe.Module) + + output = list(find_breaking_changes(package, old_package, path=["openai"])) + if output: + rich.print(Text("Breaking changes detected!", style=Style(color="rgb(165, 79, 87)"))) + rich.print() + + for text in output: + rich.print(text, end="") + + sys.exit(1) + + +main() From e328fb4d79badc7ca28a1f599a56ab43eb420363 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 20 Aug 2025 05:04:00 +0000 Subject: [PATCH 84/90] release: 1.100.3 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8910831376..f3cdcd790c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.100.2" + ".": "1.100.3" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 2254a59f75..c2f89cb09b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.100.3 (2025-08-20) + +Full Changelog: [v1.100.2...v1.100.3](https://github.com/openai/openai-python/compare/v1.100.2...v1.100.3) + +### Chores + +* **internal/ci:** setup breaking change detection ([ca2f936](https://github.com/openai/openai-python/commit/ca2f93600238e875f26395faf6afbefaf15b7c97)) + ## 1.100.2 (2025-08-19) Full Changelog: [v1.100.1...v1.100.2](https://github.com/openai/openai-python/compare/v1.100.1...v1.100.2) diff --git a/pyproject.toml b/pyproject.toml index eb1f588896..4d1055bfce 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.100.2" +version = "1.100.3" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 29840a21b8..9881b45247 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.100.2" # x-release-please-version +__version__ = "1.100.3" # x-release-please-version From 4e28a424e6afd60040e3bdf7c76eebb63bc0c407 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 21 Aug 2025 16:10:05 -0500 Subject: [PATCH 85/90] release: 1.101.0 (#2577) * feat(api): adding support for /v1/conversations to the API * chore: update github action * feat(api): Add connectors support for MCP tool * release: 1.101.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> --- .github/workflows/ci.yml | 4 +- .release-please-manifest.json | 2 +- .stats.yml | 8 +- CHANGELOG.md | 14 + api.md | 49 ++ pyproject.toml | 2 +- src/openai/__init__.py | 1 + src/openai/_client.py | 38 ++ src/openai/_module_client.py | 8 + src/openai/_version.py | 2 +- src/openai/pagination.py | 67 ++- .../resources/conversations/__init__.py | 33 ++ .../resources/conversations/conversations.py | 474 +++++++++++++++ src/openai/resources/conversations/items.py | 553 ++++++++++++++++++ src/openai/resources/responses/input_items.py | 8 - src/openai/resources/responses/responses.py | 60 +- src/openai/types/conversations/__init__.py | 27 + .../computer_screenshot_content.py | 22 + .../container_file_citation_body.py | 27 + .../types/conversations/conversation.py | 30 + .../conversation_create_params.py | 26 + .../conversation_deleted_resource.py | 15 + .../types/conversations/conversation_item.py | 209 +++++++ .../conversations/conversation_item_list.py | 26 + .../conversation_update_params.py | 19 + .../types/conversations/file_citation_body.py | 21 + .../types/conversations/input_file_content.py | 22 + .../conversations/input_image_content.py | 28 + .../types/conversations/input_text_content.py | 15 + .../types/conversations/item_create_params.py | 24 + .../types/conversations/item_list_params.py | 48 ++ .../conversations/item_retrieve_params.py | 22 + src/openai/types/conversations/lob_prob.py | 18 + src/openai/types/conversations/message.py | 56 ++ .../conversations/output_text_content.py | 30 + .../types/conversations/refusal_content.py | 15 + .../conversations/summary_text_content.py | 13 + .../types/conversations/text_content.py | 13 + .../types/conversations/top_log_prob.py | 15 + .../types/conversations/url_citation_body.py | 24 + ...create_eval_completions_run_data_source.py | 26 +- ..._eval_completions_run_data_source_param.py | 24 +- src/openai/types/responses/__init__.py | 1 + .../types/responses/input_item_list_params.py | 3 - src/openai/types/responses/response.py | 15 +- .../responses/response_conversation_param.py | 12 + .../types/responses/response_create_params.py | 14 + src/openai/types/responses/tool.py | 84 ++- src/openai/types/responses/tool_param.py | 82 ++- tests/api_resources/conversations/__init__.py | 1 + .../api_resources/conversations/test_items.py | 491 ++++++++++++++++ .../responses/test_input_items.py | 2 - tests/api_resources/test_conversations.py | 341 +++++++++++ tests/api_resources/test_responses.py | 4 + 54 files changed, 3114 insertions(+), 74 deletions(-) create mode 100644 src/openai/resources/conversations/__init__.py create mode 100644 src/openai/resources/conversations/conversations.py create mode 100644 src/openai/resources/conversations/items.py create mode 100644 src/openai/types/conversations/__init__.py create mode 100644 src/openai/types/conversations/computer_screenshot_content.py create mode 100644 src/openai/types/conversations/container_file_citation_body.py create mode 100644 src/openai/types/conversations/conversation.py create mode 100644 src/openai/types/conversations/conversation_create_params.py create mode 100644 src/openai/types/conversations/conversation_deleted_resource.py create mode 100644 src/openai/types/conversations/conversation_item.py create mode 100644 src/openai/types/conversations/conversation_item_list.py create mode 100644 src/openai/types/conversations/conversation_update_params.py create mode 100644 src/openai/types/conversations/file_citation_body.py create mode 100644 src/openai/types/conversations/input_file_content.py create mode 100644 src/openai/types/conversations/input_image_content.py create mode 100644 src/openai/types/conversations/input_text_content.py create mode 100644 src/openai/types/conversations/item_create_params.py create mode 100644 src/openai/types/conversations/item_list_params.py create mode 100644 src/openai/types/conversations/item_retrieve_params.py create mode 100644 src/openai/types/conversations/lob_prob.py create mode 100644 src/openai/types/conversations/message.py create mode 100644 src/openai/types/conversations/output_text_content.py create mode 100644 src/openai/types/conversations/refusal_content.py create mode 100644 src/openai/types/conversations/summary_text_content.py create mode 100644 src/openai/types/conversations/text_content.py create mode 100644 src/openai/types/conversations/top_log_prob.py create mode 100644 src/openai/types/conversations/url_citation_body.py create mode 100644 src/openai/types/responses/response_conversation_param.py create mode 100644 tests/api_resources/conversations/__init__.py create mode 100644 tests/api_resources/conversations/test_items.py create mode 100644 tests/api_resources/test_conversations.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8067386d5f..5e56aae09a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: run: ./scripts/lint build: - if: github.repository == 'stainless-sdks/openai-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork) + if: github.event_name == 'push' || github.event.pull_request.head.repo.fork timeout-minutes: 10 name: build permissions: @@ -61,12 +61,14 @@ jobs: run: rye build - name: Get GitHub OIDC Token + if: github.repository == 'stainless-sdks/openai-python' id: github-oidc uses: actions/github-script@v6 with: script: core.setOutput('github_token', await core.getIDToken()); - name: Upload tarball + if: github.repository == 'stainless-sdks/openai-python' env: URL: https://pkg.stainless.com/s AUTH: ${{ steps.github-oidc.outputs.github_token }} diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f3cdcd790c..070375331a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.100.3" + ".": "1.101.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index d4994342f7..f2d5304a5b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ -configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml -openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063 -config_hash: 4870312b04f48fd717ea4151053e7fb9 +configured_endpoints: 119 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ddbdf9343316047e8a773c54fb24e4a8d225955e202a1888fde6f9c8898ebf98.yml +openapi_spec_hash: 9802f6dd381558466c897f6e387e06ca +config_hash: fe0ea26680ac2075a6cd66416aefe7db diff --git a/CHANGELOG.md b/CHANGELOG.md index c2f89cb09b..44b25e0a4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## 1.101.0 (2025-08-21) + +Full Changelog: [v1.100.3...v1.101.0](https://github.com/openai/openai-python/compare/v1.100.3...v1.101.0) + +### Features + +* **api:** Add connectors support for MCP tool ([a47f962](https://github.com/openai/openai-python/commit/a47f962daf579c142b8af5579be732772b688a29)) +* **api:** adding support for /v1/conversations to the API ([e30bcbc](https://github.com/openai/openai-python/commit/e30bcbc0cb7c827af779bee6971f976261abfb67)) + + +### Chores + +* update github action ([7333b28](https://github.com/openai/openai-python/commit/7333b282718a5f6977f30e1a2548207b3a089bd4)) + ## 1.100.3 (2025-08-20) Full Changelog: [v1.100.2...v1.100.3](https://github.com/openai/openai-python/compare/v1.100.2...v1.100.3) diff --git a/api.md b/api.md index 92b068b134..7eb62e67f2 100644 --- a/api.md +++ b/api.md @@ -751,6 +751,7 @@ from openai.types.responses import ( ResponseContent, ResponseContentPartAddedEvent, ResponseContentPartDoneEvent, + ResponseConversationParam, ResponseCreatedEvent, ResponseCustomToolCall, ResponseCustomToolCallInputDeltaEvent, @@ -854,6 +855,54 @@ Methods: - client.responses.input_items.list(response_id, \*\*params) -> SyncCursorPage[ResponseItem] +# Conversations + +Types: + +```python +from openai.types.conversations import ( + ComputerScreenshotContent, + ContainerFileCitationBody, + Conversation, + ConversationDeleted, + ConversationDeletedResource, + FileCitationBody, + InputFileContent, + InputImageContent, + InputTextContent, + LobProb, + Message, + OutputTextContent, + RefusalContent, + SummaryTextContent, + TextContent, + TopLogProb, + URLCitationBody, +) +``` + +Methods: + +- client.conversations.create(\*\*params) -> Conversation +- client.conversations.retrieve(conversation_id) -> Conversation +- client.conversations.update(conversation_id, \*\*params) -> Conversation +- client.conversations.delete(conversation_id) -> ConversationDeletedResource + +## Items + +Types: + +```python +from openai.types.conversations import ConversationItem, ConversationItemList +``` + +Methods: + +- client.conversations.items.create(conversation_id, \*\*params) -> ConversationItemList +- client.conversations.items.retrieve(item_id, \*, conversation_id, \*\*params) -> ConversationItem +- client.conversations.items.list(conversation_id, \*\*params) -> SyncConversationCursorPage[ConversationItem] +- client.conversations.items.delete(item_id, \*, conversation_id) -> Conversation + # Evals Types: diff --git a/pyproject.toml b/pyproject.toml index 4d1055bfce..8198b178be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.100.3" +version = "1.101.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/__init__.py b/src/openai/__init__.py index 226fed9554..b944fbed5e 100644 --- a/src/openai/__init__.py +++ b/src/openai/__init__.py @@ -386,5 +386,6 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction] completions as completions, fine_tuning as fine_tuning, moderations as moderations, + conversations as conversations, vector_stores as vector_stores, ) diff --git a/src/openai/_client.py b/src/openai/_client.py index ed9b46f4b0..b99db786a7 100644 --- a/src/openai/_client.py +++ b/src/openai/_client.py @@ -51,6 +51,7 @@ completions, fine_tuning, moderations, + conversations, vector_stores, ) from .resources.files import Files, AsyncFiles @@ -69,6 +70,7 @@ from .resources.responses.responses import Responses, AsyncResponses from .resources.containers.containers import Containers, AsyncContainers from .resources.fine_tuning.fine_tuning import FineTuning, AsyncFineTuning + from .resources.conversations.conversations import Conversations, AsyncConversations from .resources.vector_stores.vector_stores import VectorStores, AsyncVectorStores __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"] @@ -254,6 +256,12 @@ def responses(self) -> Responses: return Responses(self) + @cached_property + def conversations(self) -> Conversations: + from .resources.conversations import Conversations + + return Conversations(self) + @cached_property def evals(self) -> Evals: from .resources.evals import Evals @@ -573,6 +581,12 @@ def responses(self) -> AsyncResponses: return AsyncResponses(self) + @cached_property + def conversations(self) -> AsyncConversations: + from .resources.conversations import AsyncConversations + + return AsyncConversations(self) + @cached_property def evals(self) -> AsyncEvals: from .resources.evals import AsyncEvals @@ -802,6 +816,12 @@ def responses(self) -> responses.ResponsesWithRawResponse: return ResponsesWithRawResponse(self._client.responses) + @cached_property + def conversations(self) -> conversations.ConversationsWithRawResponse: + from .resources.conversations import ConversationsWithRawResponse + + return ConversationsWithRawResponse(self._client.conversations) + @cached_property def evals(self) -> evals.EvalsWithRawResponse: from .resources.evals import EvalsWithRawResponse @@ -905,6 +925,12 @@ def responses(self) -> responses.AsyncResponsesWithRawResponse: return AsyncResponsesWithRawResponse(self._client.responses) + @cached_property + def conversations(self) -> conversations.AsyncConversationsWithRawResponse: + from .resources.conversations import AsyncConversationsWithRawResponse + + return AsyncConversationsWithRawResponse(self._client.conversations) + @cached_property def evals(self) -> evals.AsyncEvalsWithRawResponse: from .resources.evals import AsyncEvalsWithRawResponse @@ -1008,6 +1034,12 @@ def responses(self) -> responses.ResponsesWithStreamingResponse: return ResponsesWithStreamingResponse(self._client.responses) + @cached_property + def conversations(self) -> conversations.ConversationsWithStreamingResponse: + from .resources.conversations import ConversationsWithStreamingResponse + + return ConversationsWithStreamingResponse(self._client.conversations) + @cached_property def evals(self) -> evals.EvalsWithStreamingResponse: from .resources.evals import EvalsWithStreamingResponse @@ -1111,6 +1143,12 @@ def responses(self) -> responses.AsyncResponsesWithStreamingResponse: return AsyncResponsesWithStreamingResponse(self._client.responses) + @cached_property + def conversations(self) -> conversations.AsyncConversationsWithStreamingResponse: + from .resources.conversations import AsyncConversationsWithStreamingResponse + + return AsyncConversationsWithStreamingResponse(self._client.conversations) + @cached_property def evals(self) -> evals.AsyncEvalsWithStreamingResponse: from .resources.evals import AsyncEvalsWithStreamingResponse diff --git a/src/openai/_module_client.py b/src/openai/_module_client.py index a80e939300..5c8df24014 100644 --- a/src/openai/_module_client.py +++ b/src/openai/_module_client.py @@ -22,6 +22,7 @@ from .resources.responses.responses import Responses from .resources.containers.containers import Containers from .resources.fine_tuning.fine_tuning import FineTuning + from .resources.conversations.conversations import Conversations from .resources.vector_stores.vector_stores import VectorStores from . import _load_client @@ -130,6 +131,12 @@ def __load__(self) -> VectorStores: return _load_client().vector_stores +class ConversationsProxy(LazyProxy["Conversations"]): + @override + def __load__(self) -> Conversations: + return _load_client().conversations + + chat: Chat = ChatProxy().__as_proxied__() beta: Beta = BetaProxy().__as_proxied__() files: Files = FilesProxy().__as_proxied__() @@ -147,3 +154,4 @@ def __load__(self) -> VectorStores: moderations: Moderations = ModerationsProxy().__as_proxied__() fine_tuning: FineTuning = FineTuningProxy().__as_proxied__() vector_stores: VectorStores = VectorStoresProxy().__as_proxied__() +conversations: Conversations = ConversationsProxy().__as_proxied__() diff --git a/src/openai/_version.py b/src/openai/_version.py index 9881b45247..802084af5d 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.100.3" # x-release-please-version +__version__ = "1.101.0" # x-release-please-version diff --git a/src/openai/pagination.py b/src/openai/pagination.py index a59cced854..4dd3788aa3 100644 --- a/src/openai/pagination.py +++ b/src/openai/pagination.py @@ -5,7 +5,14 @@ from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage -__all__ = ["SyncPage", "AsyncPage", "SyncCursorPage", "AsyncCursorPage"] +__all__ = [ + "SyncPage", + "AsyncPage", + "SyncCursorPage", + "AsyncCursorPage", + "SyncConversationCursorPage", + "AsyncConversationCursorPage", +] _T = TypeVar("_T") @@ -123,3 +130,61 @@ def next_page_info(self) -> Optional[PageInfo]: return None return PageInfo(params={"after": item.id}) + + +class SyncConversationCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): + data: List[_T] + has_more: Optional[bool] = None + last_id: Optional[str] = None + + @override + def _get_page_items(self) -> List[_T]: + data = self.data + if not data: + return [] + return data + + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + + @override + def next_page_info(self) -> Optional[PageInfo]: + last_id = self.last_id + if not last_id: + return None + + return PageInfo(params={"after": last_id}) + + +class AsyncConversationCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]): + data: List[_T] + has_more: Optional[bool] = None + last_id: Optional[str] = None + + @override + def _get_page_items(self) -> List[_T]: + data = self.data + if not data: + return [] + return data + + @override + def has_next_page(self) -> bool: + has_more = self.has_more + if has_more is not None and has_more is False: + return False + + return super().has_next_page() + + @override + def next_page_info(self) -> Optional[PageInfo]: + last_id = self.last_id + if not last_id: + return None + + return PageInfo(params={"after": last_id}) diff --git a/src/openai/resources/conversations/__init__.py b/src/openai/resources/conversations/__init__.py new file mode 100644 index 0000000000..c6c4fd6ee4 --- /dev/null +++ b/src/openai/resources/conversations/__init__.py @@ -0,0 +1,33 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .items import ( + Items, + AsyncItems, + ItemsWithRawResponse, + AsyncItemsWithRawResponse, + ItemsWithStreamingResponse, + AsyncItemsWithStreamingResponse, +) +from .conversations import ( + Conversations, + AsyncConversations, + ConversationsWithRawResponse, + AsyncConversationsWithRawResponse, + ConversationsWithStreamingResponse, + AsyncConversationsWithStreamingResponse, +) + +__all__ = [ + "Items", + "AsyncItems", + "ItemsWithRawResponse", + "AsyncItemsWithRawResponse", + "ItemsWithStreamingResponse", + "AsyncItemsWithStreamingResponse", + "Conversations", + "AsyncConversations", + "ConversationsWithRawResponse", + "AsyncConversationsWithRawResponse", + "ConversationsWithStreamingResponse", + "AsyncConversationsWithStreamingResponse", +] diff --git a/src/openai/resources/conversations/conversations.py b/src/openai/resources/conversations/conversations.py new file mode 100644 index 0000000000..13bc1fb1ce --- /dev/null +++ b/src/openai/resources/conversations/conversations.py @@ -0,0 +1,474 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable, Optional + +import httpx + +from ... import _legacy_response +from .items import ( + Items, + AsyncItems, + ItemsWithRawResponse, + AsyncItemsWithRawResponse, + ItemsWithStreamingResponse, + AsyncItemsWithStreamingResponse, +) +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ..._base_client import make_request_options +from ...types.conversations import conversation_create_params, conversation_update_params +from ...types.shared_params.metadata import Metadata +from ...types.conversations.conversation import Conversation +from ...types.responses.response_input_item_param import ResponseInputItemParam +from ...types.conversations.conversation_deleted_resource import ConversationDeletedResource + +__all__ = ["Conversations", "AsyncConversations"] + + +class Conversations(SyncAPIResource): + @cached_property + def items(self) -> Items: + return Items(self._client) + + @cached_property + def with_raw_response(self) -> ConversationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ConversationsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ConversationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ConversationsWithStreamingResponse(self) + + def create( + self, + *, + items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Create a conversation with the given ID. + + Args: + items: Initial items to include in the conversation context. You may add up to 20 items + at a time. + + metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing + additional information about the object in a structured format. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return self._post( + "/conversations", + body=maybe_transform( + { + "items": items, + "metadata": metadata, + }, + conversation_create_params.ConversationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + def retrieve( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Get a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._get( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + def update( + self, + conversation_id: str, + *, + metadata: Dict[str, str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Update a conversation's metadata with the given ID. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._post( + f"/conversations/{conversation_id}", + body=maybe_transform({"metadata": metadata}, conversation_update_params.ConversationUpdateParams), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + def delete( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationDeletedResource: + """ + Delete a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._delete( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConversationDeletedResource, + ) + + +class AsyncConversations(AsyncAPIResource): + @cached_property + def items(self) -> AsyncItems: + return AsyncItems(self._client) + + @cached_property + def with_raw_response(self) -> AsyncConversationsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncConversationsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncConversationsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncConversationsWithStreamingResponse(self) + + async def create( + self, + *, + items: Optional[Iterable[ResponseInputItemParam]] | NotGiven = NOT_GIVEN, + metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Create a conversation with the given ID. + + Args: + items: Initial items to include in the conversation context. You may add up to 20 items + at a time. + + metadata: Set of 16 key-value pairs that can be attached to an object. Useful for storing + additional information about the object in a structured format. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + return await self._post( + "/conversations", + body=await async_maybe_transform( + { + "items": items, + "metadata": metadata, + }, + conversation_create_params.ConversationCreateParams, + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + async def retrieve( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Get a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._get( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + async def update( + self, + conversation_id: str, + *, + metadata: Dict[str, str], + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Update a conversation's metadata with the given ID. + + Args: + metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful + for storing additional information about the object in a structured format, and + querying for objects via API or the dashboard. Keys are strings with a maximum + length of 64 characters. Values are strings with a maximum length of 512 + characters. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._post( + f"/conversations/{conversation_id}", + body=await async_maybe_transform( + {"metadata": metadata}, conversation_update_params.ConversationUpdateParams + ), + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + async def delete( + self, + conversation_id: str, + *, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationDeletedResource: + """ + Delete a conversation with the given ID. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._delete( + f"/conversations/{conversation_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=ConversationDeletedResource, + ) + + +class ConversationsWithRawResponse: + def __init__(self, conversations: Conversations) -> None: + self._conversations = conversations + + self.create = _legacy_response.to_raw_response_wrapper( + conversations.create, + ) + self.retrieve = _legacy_response.to_raw_response_wrapper( + conversations.retrieve, + ) + self.update = _legacy_response.to_raw_response_wrapper( + conversations.update, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> ItemsWithRawResponse: + return ItemsWithRawResponse(self._conversations.items) + + +class AsyncConversationsWithRawResponse: + def __init__(self, conversations: AsyncConversations) -> None: + self._conversations = conversations + + self.create = _legacy_response.async_to_raw_response_wrapper( + conversations.create, + ) + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + conversations.retrieve, + ) + self.update = _legacy_response.async_to_raw_response_wrapper( + conversations.update, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> AsyncItemsWithRawResponse: + return AsyncItemsWithRawResponse(self._conversations.items) + + +class ConversationsWithStreamingResponse: + def __init__(self, conversations: Conversations) -> None: + self._conversations = conversations + + self.create = to_streamed_response_wrapper( + conversations.create, + ) + self.retrieve = to_streamed_response_wrapper( + conversations.retrieve, + ) + self.update = to_streamed_response_wrapper( + conversations.update, + ) + self.delete = to_streamed_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> ItemsWithStreamingResponse: + return ItemsWithStreamingResponse(self._conversations.items) + + +class AsyncConversationsWithStreamingResponse: + def __init__(self, conversations: AsyncConversations) -> None: + self._conversations = conversations + + self.create = async_to_streamed_response_wrapper( + conversations.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + conversations.retrieve, + ) + self.update = async_to_streamed_response_wrapper( + conversations.update, + ) + self.delete = async_to_streamed_response_wrapper( + conversations.delete, + ) + + @cached_property + def items(self) -> AsyncItemsWithStreamingResponse: + return AsyncItemsWithStreamingResponse(self._conversations.items) diff --git a/src/openai/resources/conversations/items.py b/src/openai/resources/conversations/items.py new file mode 100644 index 0000000000..1e696a79ed --- /dev/null +++ b/src/openai/resources/conversations/items.py @@ -0,0 +1,553 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Any, List, Iterable, cast +from typing_extensions import Literal + +import httpx + +from ... import _legacy_response +from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven +from ..._utils import maybe_transform, async_maybe_transform +from ..._compat import cached_property +from ..._resource import SyncAPIResource, AsyncAPIResource +from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper +from ...pagination import SyncConversationCursorPage, AsyncConversationCursorPage +from ..._base_client import AsyncPaginator, make_request_options +from ...types.conversations import item_list_params, item_create_params, item_retrieve_params +from ...types.conversations.conversation import Conversation +from ...types.responses.response_includable import ResponseIncludable +from ...types.conversations.conversation_item import ConversationItem +from ...types.responses.response_input_item_param import ResponseInputItemParam +from ...types.conversations.conversation_item_list import ConversationItemList + +__all__ = ["Items", "AsyncItems"] + + +class Items(SyncAPIResource): + @cached_property + def with_raw_response(self) -> ItemsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return ItemsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> ItemsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return ItemsWithStreamingResponse(self) + + def create( + self, + conversation_id: str, + *, + items: Iterable[ResponseInputItemParam], + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItemList: + """ + Create items in a conversation with the given ID. + + Args: + items: The items to add to the conversation. You may add up to 20 items at a time. + + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._post( + f"/conversations/{conversation_id}/items", + body=maybe_transform({"items": items}, item_create_params.ItemCreateParams), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, item_create_params.ItemCreateParams), + ), + cast_to=ConversationItemList, + ) + + def retrieve( + self, + item_id: str, + *, + conversation_id: str, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItem: + """ + Get a single item from a conversation with the given IDs. + + Args: + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return cast( + ConversationItem, + self._get( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams), + ), + cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ), + ) + + def list( + self, + conversation_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> SyncConversationCursorPage[ConversationItem]: + """ + List all items for a conversation with the given ID. + + Args: + after: An item ID to list items after, used in pagination. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `desc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._get_api_list( + f"/conversations/{conversation_id}/items", + page=SyncConversationCursorPage[ConversationItem], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "include": include, + "limit": limit, + "order": order, + }, + item_list_params.ItemListParams, + ), + ), + model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ) + + def delete( + self, + item_id: str, + *, + conversation_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Delete an item from a conversation with the given IDs. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return self._delete( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + +class AsyncItems(AsyncAPIResource): + @cached_property + def with_raw_response(self) -> AsyncItemsWithRawResponse: + """ + This property can be used as a prefix for any HTTP method call to return + the raw response object instead of the parsed content. + + For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers + """ + return AsyncItemsWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncItemsWithStreamingResponse: + """ + An alternative to `.with_raw_response` that doesn't eagerly read the response body. + + For more information, see https://www.github.com/openai/openai-python#with_streaming_response + """ + return AsyncItemsWithStreamingResponse(self) + + async def create( + self, + conversation_id: str, + *, + items: Iterable[ResponseInputItemParam], + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItemList: + """ + Create items in a conversation with the given ID. + + Args: + items: The items to add to the conversation. You may add up to 20 items at a time. + + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return await self._post( + f"/conversations/{conversation_id}/items", + body=await async_maybe_transform({"items": items}, item_create_params.ItemCreateParams), + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, item_create_params.ItemCreateParams), + ), + cast_to=ConversationItemList, + ) + + async def retrieve( + self, + item_id: str, + *, + conversation_id: str, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> ConversationItem: + """ + Get a single item from a conversation with the given IDs. + + Args: + include: Additional fields to include in the response. See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return cast( + ConversationItem, + await self._get( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=await async_maybe_transform({"include": include}, item_retrieve_params.ItemRetrieveParams), + ), + cast_to=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ), + ) + + def list( + self, + conversation_id: str, + *, + after: str | NotGiven = NOT_GIVEN, + include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, + limit: int | NotGiven = NOT_GIVEN, + order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> AsyncPaginator[ConversationItem, AsyncConversationCursorPage[ConversationItem]]: + """ + List all items for a conversation with the given ID. + + Args: + after: An item ID to list items after, used in pagination. + + include: Specify additional output data to include in the model response. Currently + supported values are: + + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). + + limit: A limit on the number of objects to be returned. Limit can range between 1 and + 100, and the default is 20. + + order: The order to return the input items in. Default is `desc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + return self._get_api_list( + f"/conversations/{conversation_id}/items", + page=AsyncConversationCursorPage[ConversationItem], + options=make_request_options( + extra_headers=extra_headers, + extra_query=extra_query, + extra_body=extra_body, + timeout=timeout, + query=maybe_transform( + { + "after": after, + "include": include, + "limit": limit, + "order": order, + }, + item_list_params.ItemListParams, + ), + ), + model=cast(Any, ConversationItem), # Union types cannot be passed in as arguments in the type system + ) + + async def delete( + self, + item_id: str, + *, + conversation_id: str, + # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. + # The extra values given here take precedence over values defined on the client or passed to this method. + extra_headers: Headers | None = None, + extra_query: Query | None = None, + extra_body: Body | None = None, + timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, + ) -> Conversation: + """ + Delete an item from a conversation with the given IDs. + + Args: + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + + extra_body: Add additional JSON properties to the request + + timeout: Override the client-level default timeout for this request, in seconds + """ + if not conversation_id: + raise ValueError(f"Expected a non-empty value for `conversation_id` but received {conversation_id!r}") + if not item_id: + raise ValueError(f"Expected a non-empty value for `item_id` but received {item_id!r}") + return await self._delete( + f"/conversations/{conversation_id}/items/{item_id}", + options=make_request_options( + extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout + ), + cast_to=Conversation, + ) + + +class ItemsWithRawResponse: + def __init__(self, items: Items) -> None: + self._items = items + + self.create = _legacy_response.to_raw_response_wrapper( + items.create, + ) + self.retrieve = _legacy_response.to_raw_response_wrapper( + items.retrieve, + ) + self.list = _legacy_response.to_raw_response_wrapper( + items.list, + ) + self.delete = _legacy_response.to_raw_response_wrapper( + items.delete, + ) + + +class AsyncItemsWithRawResponse: + def __init__(self, items: AsyncItems) -> None: + self._items = items + + self.create = _legacy_response.async_to_raw_response_wrapper( + items.create, + ) + self.retrieve = _legacy_response.async_to_raw_response_wrapper( + items.retrieve, + ) + self.list = _legacy_response.async_to_raw_response_wrapper( + items.list, + ) + self.delete = _legacy_response.async_to_raw_response_wrapper( + items.delete, + ) + + +class ItemsWithStreamingResponse: + def __init__(self, items: Items) -> None: + self._items = items + + self.create = to_streamed_response_wrapper( + items.create, + ) + self.retrieve = to_streamed_response_wrapper( + items.retrieve, + ) + self.list = to_streamed_response_wrapper( + items.list, + ) + self.delete = to_streamed_response_wrapper( + items.delete, + ) + + +class AsyncItemsWithStreamingResponse: + def __init__(self, items: AsyncItems) -> None: + self._items = items + + self.create = async_to_streamed_response_wrapper( + items.create, + ) + self.retrieve = async_to_streamed_response_wrapper( + items.retrieve, + ) + self.list = async_to_streamed_response_wrapper( + items.list, + ) + self.delete = async_to_streamed_response_wrapper( + items.delete, + ) diff --git a/src/openai/resources/responses/input_items.py b/src/openai/resources/responses/input_items.py index a425a65c3e..9f3ef637ce 100644 --- a/src/openai/resources/responses/input_items.py +++ b/src/openai/resources/responses/input_items.py @@ -47,7 +47,6 @@ def list( response_id: str, *, after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, @@ -64,8 +63,6 @@ def list( Args: after: An item ID to list items after, used in pagination. - before: An item ID to list items before, used in pagination. - include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. @@ -98,7 +95,6 @@ def list( query=maybe_transform( { "after": after, - "before": before, "include": include, "limit": limit, "order": order, @@ -135,7 +131,6 @@ def list( response_id: str, *, after: str | NotGiven = NOT_GIVEN, - before: str | NotGiven = NOT_GIVEN, include: List[ResponseIncludable] | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, @@ -152,8 +147,6 @@ def list( Args: after: An item ID to list items after, used in pagination. - before: An item ID to list items before, used in pagination. - include: Additional fields to include in the response. See the `include` parameter for Response creation above for more information. @@ -186,7 +179,6 @@ def list( query=maybe_transform( { "after": after, - "before": before, "include": include, "limit": limit, "order": order, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 375f8b7e71..d0862f5d76 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -77,6 +77,7 @@ def create( self, *, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -127,6 +128,11 @@ def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: @@ -187,6 +193,7 @@ def create( previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). @@ -305,6 +312,7 @@ def create( *, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -361,6 +369,11 @@ def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: @@ -421,6 +434,7 @@ def create( previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). @@ -532,6 +546,7 @@ def create( *, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -588,6 +603,11 @@ def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: @@ -648,6 +668,7 @@ def create( previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). @@ -757,6 +778,7 @@ def create( self, *, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -794,6 +816,7 @@ def create( body=maybe_transform( { "background": background, + "conversation": conversation, "include": include, "input": input, "instructions": instructions, @@ -866,7 +889,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam| NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -1009,6 +1032,7 @@ def parse( *, text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1027,7 +1051,7 @@ def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam| NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1065,6 +1089,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: body=maybe_transform( { "background": background, + "conversation": conversation, "include": include, "input": input, "instructions": instructions, @@ -1440,6 +1465,7 @@ async def create( self, *, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1490,6 +1516,11 @@ async def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: @@ -1550,6 +1581,7 @@ async def create( previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). @@ -1668,6 +1700,7 @@ async def create( *, stream: Literal[True], background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1724,6 +1757,11 @@ async def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: @@ -1784,6 +1822,7 @@ async def create( previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). @@ -1895,6 +1934,7 @@ async def create( *, stream: bool, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -1951,6 +1991,11 @@ async def create( background: Whether to run the model response in the background. [Learn more](https://platform.openai.com/docs/guides/background). + conversation: The conversation that this response belongs to. Items from this conversation are + prepended to `input_items` for this response request. Input items and output + items from this response are automatically added to this conversation after this + response completes. + include: Specify additional output data to include in the model response. Currently supported values are: @@ -2011,6 +2056,7 @@ async def create( previous_response_id: The unique ID of the previous response to the model. Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). @@ -2120,6 +2166,7 @@ async def create( self, *, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2157,6 +2204,7 @@ async def create( body=await async_maybe_transform( { "background": background, + "conversation": conversation, "include": include, "input": input, "instructions": instructions, @@ -2229,7 +2277,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam| NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2261,7 +2309,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam| NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2376,6 +2424,7 @@ async def parse( *, text_format: type[TextFormatT] | NotGiven = NOT_GIVEN, background: Optional[bool] | NotGiven = NOT_GIVEN, + conversation: Optional[response_create_params.Conversation] | NotGiven = NOT_GIVEN, include: Optional[List[ResponseIncludable]] | NotGiven = NOT_GIVEN, input: Union[str, ResponseInputParam] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, @@ -2394,7 +2443,7 @@ async def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam| NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2432,6 +2481,7 @@ def parser(raw_response: Response) -> ParsedResponse[TextFormatT]: body=maybe_transform( { "background": background, + "conversation": conversation, "include": include, "input": input, "instructions": instructions, diff --git a/src/openai/types/conversations/__init__.py b/src/openai/types/conversations/__init__.py new file mode 100644 index 0000000000..538966db4f --- /dev/null +++ b/src/openai/types/conversations/__init__.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .message import Message as Message +from .lob_prob import LobProb as LobProb +from .conversation import Conversation as Conversation +from .text_content import TextContent as TextContent +from .top_log_prob import TopLogProb as TopLogProb +from .refusal_content import RefusalContent as RefusalContent +from .item_list_params import ItemListParams as ItemListParams +from .conversation_item import ConversationItem as ConversationItem +from .url_citation_body import URLCitationBody as URLCitationBody +from .file_citation_body import FileCitationBody as FileCitationBody +from .input_file_content import InputFileContent as InputFileContent +from .input_text_content import InputTextContent as InputTextContent +from .item_create_params import ItemCreateParams as ItemCreateParams +from .input_image_content import InputImageContent as InputImageContent +from .output_text_content import OutputTextContent as OutputTextContent +from .item_retrieve_params import ItemRetrieveParams as ItemRetrieveParams +from .summary_text_content import SummaryTextContent as SummaryTextContent +from .conversation_item_list import ConversationItemList as ConversationItemList +from .conversation_create_params import ConversationCreateParams as ConversationCreateParams +from .conversation_update_params import ConversationUpdateParams as ConversationUpdateParams +from .computer_screenshot_content import ComputerScreenshotContent as ComputerScreenshotContent +from .container_file_citation_body import ContainerFileCitationBody as ContainerFileCitationBody +from .conversation_deleted_resource import ConversationDeletedResource as ConversationDeletedResource diff --git a/src/openai/types/conversations/computer_screenshot_content.py b/src/openai/types/conversations/computer_screenshot_content.py new file mode 100644 index 0000000000..897b7ada0d --- /dev/null +++ b/src/openai/types/conversations/computer_screenshot_content.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ComputerScreenshotContent"] + + +class ComputerScreenshotContent(BaseModel): + file_id: Optional[str] = None + """The identifier of an uploaded file that contains the screenshot.""" + + image_url: Optional[str] = None + """The URL of the screenshot image.""" + + type: Literal["computer_screenshot"] + """Specifies the event type. + + For a computer screenshot, this property is always set to `computer_screenshot`. + """ diff --git a/src/openai/types/conversations/container_file_citation_body.py b/src/openai/types/conversations/container_file_citation_body.py new file mode 100644 index 0000000000..ea460df2e2 --- /dev/null +++ b/src/openai/types/conversations/container_file_citation_body.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ContainerFileCitationBody"] + + +class ContainerFileCitationBody(BaseModel): + container_id: str + """The ID of the container file.""" + + end_index: int + """The index of the last character of the container file citation in the message.""" + + file_id: str + """The ID of the file.""" + + filename: str + """The filename of the container file cited.""" + + start_index: int + """The index of the first character of the container file citation in the message.""" + + type: Literal["container_file_citation"] + """The type of the container file citation. Always `container_file_citation`.""" diff --git a/src/openai/types/conversations/conversation.py b/src/openai/types/conversations/conversation.py new file mode 100644 index 0000000000..ed63d40355 --- /dev/null +++ b/src/openai/types/conversations/conversation.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["Conversation"] + + +class Conversation(BaseModel): + id: str + """The unique ID of the conversation.""" + + created_at: int + """ + The time at which the conversation was created, measured in seconds since the + Unix epoch. + """ + + metadata: object + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters. + """ + + object: Literal["conversation"] + """The object type, which is always `conversation`.""" diff --git a/src/openai/types/conversations/conversation_create_params.py b/src/openai/types/conversations/conversation_create_params.py new file mode 100644 index 0000000000..7ad3f8ae2d --- /dev/null +++ b/src/openai/types/conversations/conversation_create_params.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Iterable, Optional +from typing_extensions import TypedDict + +from ..shared_params.metadata import Metadata +from ..responses.response_input_item_param import ResponseInputItemParam + +__all__ = ["ConversationCreateParams"] + + +class ConversationCreateParams(TypedDict, total=False): + items: Optional[Iterable[ResponseInputItemParam]] + """ + Initial items to include in the conversation context. You may add up to 20 items + at a time. + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + Useful for storing additional information about the object in a structured + format. + """ diff --git a/src/openai/types/conversations/conversation_deleted_resource.py b/src/openai/types/conversations/conversation_deleted_resource.py new file mode 100644 index 0000000000..7abcb2448e --- /dev/null +++ b/src/openai/types/conversations/conversation_deleted_resource.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ConversationDeletedResource"] + + +class ConversationDeletedResource(BaseModel): + id: str + + deleted: bool + + object: Literal["conversation.deleted"] diff --git a/src/openai/types/conversations/conversation_item.py b/src/openai/types/conversations/conversation_item.py new file mode 100644 index 0000000000..a7cd355f36 --- /dev/null +++ b/src/openai/types/conversations/conversation_item.py @@ -0,0 +1,209 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from .message import Message +from ..._utils import PropertyInfo +from ..._models import BaseModel +from ..responses.response_reasoning_item import ResponseReasoningItem +from ..responses.response_custom_tool_call import ResponseCustomToolCall +from ..responses.response_computer_tool_call import ResponseComputerToolCall +from ..responses.response_function_web_search import ResponseFunctionWebSearch +from ..responses.response_file_search_tool_call import ResponseFileSearchToolCall +from ..responses.response_custom_tool_call_output import ResponseCustomToolCallOutput +from ..responses.response_function_tool_call_item import ResponseFunctionToolCallItem +from ..responses.response_code_interpreter_tool_call import ResponseCodeInterpreterToolCall +from ..responses.response_computer_tool_call_output_item import ResponseComputerToolCallOutputItem +from ..responses.response_function_tool_call_output_item import ResponseFunctionToolCallOutputItem + +__all__ = [ + "ConversationItem", + "ImageGenerationCall", + "LocalShellCall", + "LocalShellCallAction", + "LocalShellCallOutput", + "McpListTools", + "McpListToolsTool", + "McpApprovalRequest", + "McpApprovalResponse", + "McpCall", +] + + +class ImageGenerationCall(BaseModel): + id: str + """The unique ID of the image generation call.""" + + result: Optional[str] = None + """The generated image encoded in base64.""" + + status: Literal["in_progress", "completed", "generating", "failed"] + """The status of the image generation call.""" + + type: Literal["image_generation_call"] + """The type of the image generation call. Always `image_generation_call`.""" + + +class LocalShellCallAction(BaseModel): + command: List[str] + """The command to run.""" + + env: Dict[str, str] + """Environment variables to set for the command.""" + + type: Literal["exec"] + """The type of the local shell action. Always `exec`.""" + + timeout_ms: Optional[int] = None + """Optional timeout in milliseconds for the command.""" + + user: Optional[str] = None + """Optional user to run the command as.""" + + working_directory: Optional[str] = None + """Optional working directory to run the command in.""" + + +class LocalShellCall(BaseModel): + id: str + """The unique ID of the local shell call.""" + + action: LocalShellCallAction + """Execute a shell command on the server.""" + + call_id: str + """The unique ID of the local shell tool call generated by the model.""" + + status: Literal["in_progress", "completed", "incomplete"] + """The status of the local shell call.""" + + type: Literal["local_shell_call"] + """The type of the local shell call. Always `local_shell_call`.""" + + +class LocalShellCallOutput(BaseModel): + id: str + """The unique ID of the local shell tool call generated by the model.""" + + output: str + """A JSON string of the output of the local shell tool call.""" + + type: Literal["local_shell_call_output"] + """The type of the local shell tool call output. Always `local_shell_call_output`.""" + + status: Optional[Literal["in_progress", "completed", "incomplete"]] = None + """The status of the item. One of `in_progress`, `completed`, or `incomplete`.""" + + +class McpListToolsTool(BaseModel): + input_schema: object + """The JSON schema describing the tool's input.""" + + name: str + """The name of the tool.""" + + annotations: Optional[object] = None + """Additional annotations about the tool.""" + + description: Optional[str] = None + """The description of the tool.""" + + +class McpListTools(BaseModel): + id: str + """The unique ID of the list.""" + + server_label: str + """The label of the MCP server.""" + + tools: List[McpListToolsTool] + """The tools available on the server.""" + + type: Literal["mcp_list_tools"] + """The type of the item. Always `mcp_list_tools`.""" + + error: Optional[str] = None + """Error message if the server could not list tools.""" + + +class McpApprovalRequest(BaseModel): + id: str + """The unique ID of the approval request.""" + + arguments: str + """A JSON string of arguments for the tool.""" + + name: str + """The name of the tool to run.""" + + server_label: str + """The label of the MCP server making the request.""" + + type: Literal["mcp_approval_request"] + """The type of the item. Always `mcp_approval_request`.""" + + +class McpApprovalResponse(BaseModel): + id: str + """The unique ID of the approval response""" + + approval_request_id: str + """The ID of the approval request being answered.""" + + approve: bool + """Whether the request was approved.""" + + type: Literal["mcp_approval_response"] + """The type of the item. Always `mcp_approval_response`.""" + + reason: Optional[str] = None + """Optional reason for the decision.""" + + +class McpCall(BaseModel): + id: str + """The unique ID of the tool call.""" + + arguments: str + """A JSON string of the arguments passed to the tool.""" + + name: str + """The name of the tool that was run.""" + + server_label: str + """The label of the MCP server running the tool.""" + + type: Literal["mcp_call"] + """The type of the item. Always `mcp_call`.""" + + error: Optional[str] = None + """The error from the tool call, if any.""" + + output: Optional[str] = None + """The output from the tool call.""" + + +ConversationItem: TypeAlias = Annotated[ + Union[ + Message, + ResponseFunctionToolCallItem, + ResponseFunctionToolCallOutputItem, + ResponseFileSearchToolCall, + ResponseFunctionWebSearch, + ImageGenerationCall, + ResponseComputerToolCall, + ResponseComputerToolCallOutputItem, + ResponseReasoningItem, + ResponseCodeInterpreterToolCall, + LocalShellCall, + LocalShellCallOutput, + McpListTools, + McpApprovalRequest, + McpApprovalResponse, + McpCall, + ResponseCustomToolCall, + ResponseCustomToolCallOutput, + ], + PropertyInfo(discriminator="type"), +] diff --git a/src/openai/types/conversations/conversation_item_list.py b/src/openai/types/conversations/conversation_item_list.py new file mode 100644 index 0000000000..20091102cb --- /dev/null +++ b/src/openai/types/conversations/conversation_item_list.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List +from typing_extensions import Literal + +from ..._models import BaseModel +from .conversation_item import ConversationItem + +__all__ = ["ConversationItemList"] + + +class ConversationItemList(BaseModel): + data: List[ConversationItem] + """A list of conversation items.""" + + first_id: str + """The ID of the first item in the list.""" + + has_more: bool + """Whether there are more items available.""" + + last_id: str + """The ID of the last item in the list.""" + + object: Literal["list"] + """The type of object returned, must be `list`.""" diff --git a/src/openai/types/conversations/conversation_update_params.py b/src/openai/types/conversations/conversation_update_params.py new file mode 100644 index 0000000000..f2aa42d833 --- /dev/null +++ b/src/openai/types/conversations/conversation_update_params.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import Required, TypedDict + +__all__ = ["ConversationUpdateParams"] + + +class ConversationUpdateParams(TypedDict, total=False): + metadata: Required[Dict[str, str]] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. Keys are + strings with a maximum length of 64 characters. Values are strings with a + maximum length of 512 characters. + """ diff --git a/src/openai/types/conversations/file_citation_body.py b/src/openai/types/conversations/file_citation_body.py new file mode 100644 index 0000000000..ea90ae381d --- /dev/null +++ b/src/openai/types/conversations/file_citation_body.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["FileCitationBody"] + + +class FileCitationBody(BaseModel): + file_id: str + """The ID of the file.""" + + filename: str + """The filename of the file cited.""" + + index: int + """The index of the file in the list of files.""" + + type: Literal["file_citation"] + """The type of the file citation. Always `file_citation`.""" diff --git a/src/openai/types/conversations/input_file_content.py b/src/openai/types/conversations/input_file_content.py new file mode 100644 index 0000000000..6aef7a89d9 --- /dev/null +++ b/src/openai/types/conversations/input_file_content.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputFileContent"] + + +class InputFileContent(BaseModel): + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + type: Literal["input_file"] + """The type of the input item. Always `input_file`.""" + + file_url: Optional[str] = None + """The URL of the file to be sent to the model.""" + + filename: Optional[str] = None + """The name of the file to be sent to the model.""" diff --git a/src/openai/types/conversations/input_image_content.py b/src/openai/types/conversations/input_image_content.py new file mode 100644 index 0000000000..f2587e0adc --- /dev/null +++ b/src/openai/types/conversations/input_image_content.py @@ -0,0 +1,28 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputImageContent"] + + +class InputImageContent(BaseModel): + detail: Literal["low", "high", "auto"] + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + file_id: Optional[str] = None + """The ID of the file to be sent to the model.""" + + image_url: Optional[str] = None + """The URL of the image to be sent to the model. + + A fully qualified URL or base64 encoded image in a data URL. + """ + + type: Literal["input_image"] + """The type of the input item. Always `input_image`.""" diff --git a/src/openai/types/conversations/input_text_content.py b/src/openai/types/conversations/input_text_content.py new file mode 100644 index 0000000000..5e2daebdc5 --- /dev/null +++ b/src/openai/types/conversations/input_text_content.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["InputTextContent"] + + +class InputTextContent(BaseModel): + text: str + """The text input to the model.""" + + type: Literal["input_text"] + """The type of the input item. Always `input_text`.""" diff --git a/src/openai/types/conversations/item_create_params.py b/src/openai/types/conversations/item_create_params.py new file mode 100644 index 0000000000..9158b7167f --- /dev/null +++ b/src/openai/types/conversations/item_create_params.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List, Iterable +from typing_extensions import Required, TypedDict + +from ..responses.response_includable import ResponseIncludable +from ..responses.response_input_item_param import ResponseInputItemParam + +__all__ = ["ItemCreateParams"] + + +class ItemCreateParams(TypedDict, total=False): + items: Required[Iterable[ResponseInputItemParam]] + """The items to add to the conversation. You may add up to 20 items at a time.""" + + include: List[ResponseIncludable] + """Additional fields to include in the response. + + See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + """ diff --git a/src/openai/types/conversations/item_list_params.py b/src/openai/types/conversations/item_list_params.py new file mode 100644 index 0000000000..34bf43c559 --- /dev/null +++ b/src/openai/types/conversations/item_list_params.py @@ -0,0 +1,48 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Literal, TypedDict + +from ..responses.response_includable import ResponseIncludable + +__all__ = ["ItemListParams"] + + +class ItemListParams(TypedDict, total=False): + after: str + """An item ID to list items after, used in pagination.""" + + include: List[ResponseIncludable] + """Specify additional output data to include in the model response. + + Currently supported values are: + + - `code_interpreter_call.outputs`: Includes the outputs of python code execution + in code interpreter tool call items. + - `computer_call_output.output.image_url`: Include image urls from the computer + call output. + - `file_search_call.results`: Include the search results of the file search tool + call. + - `message.input_image.image_url`: Include image urls from the input message. + - `message.output_text.logprobs`: Include logprobs with assistant messages. + - `reasoning.encrypted_content`: Includes an encrypted version of reasoning + tokens in reasoning item outputs. This enables reasoning items to be used in + multi-turn conversations when using the Responses API statelessly (like when + the `store` parameter is set to `false`, or when an organization is enrolled + in the zero data retention program). + """ + + limit: int + """A limit on the number of objects to be returned. + + Limit can range between 1 and 100, and the default is 20. + """ + + order: Literal["asc", "desc"] + """The order to return the input items in. Default is `desc`. + + - `asc`: Return the input items in ascending order. + - `desc`: Return the input items in descending order. + """ diff --git a/src/openai/types/conversations/item_retrieve_params.py b/src/openai/types/conversations/item_retrieve_params.py new file mode 100644 index 0000000000..8c5db1e533 --- /dev/null +++ b/src/openai/types/conversations/item_retrieve_params.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import List +from typing_extensions import Required, TypedDict + +from ..responses.response_includable import ResponseIncludable + +__all__ = ["ItemRetrieveParams"] + + +class ItemRetrieveParams(TypedDict, total=False): + conversation_id: Required[str] + + include: List[ResponseIncludable] + """Additional fields to include in the response. + + See the `include` parameter for + [listing Conversation items above](https://platform.openai.com/docs/api-reference/conversations/list-items#conversations_list_items-include) + for more information. + """ diff --git a/src/openai/types/conversations/lob_prob.py b/src/openai/types/conversations/lob_prob.py new file mode 100644 index 0000000000..f7dcd62a5e --- /dev/null +++ b/src/openai/types/conversations/lob_prob.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel +from .top_log_prob import TopLogProb + +__all__ = ["LobProb"] + + +class LobProb(BaseModel): + token: str + + bytes: List[int] + + logprob: float + + top_logprobs: List[TopLogProb] diff --git a/src/openai/types/conversations/message.py b/src/openai/types/conversations/message.py new file mode 100644 index 0000000000..a070cf2869 --- /dev/null +++ b/src/openai/types/conversations/message.py @@ -0,0 +1,56 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from .text_content import TextContent +from .refusal_content import RefusalContent +from .input_file_content import InputFileContent +from .input_text_content import InputTextContent +from .input_image_content import InputImageContent +from .output_text_content import OutputTextContent +from .summary_text_content import SummaryTextContent +from .computer_screenshot_content import ComputerScreenshotContent + +__all__ = ["Message", "Content"] + +Content: TypeAlias = Annotated[ + Union[ + InputTextContent, + OutputTextContent, + TextContent, + SummaryTextContent, + RefusalContent, + InputImageContent, + ComputerScreenshotContent, + InputFileContent, + ], + PropertyInfo(discriminator="type"), +] + + +class Message(BaseModel): + id: str + """The unique ID of the message.""" + + content: List[Content] + """The content of the message""" + + role: Literal["unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool"] + """The role of the message. + + One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`, + `developer`, or `tool`. + """ + + status: Literal["in_progress", "completed", "incomplete"] + """The status of item. + + One of `in_progress`, `completed`, or `incomplete`. Populated when items are + returned via API. + """ + + type: Literal["message"] + """The type of the message. Always set to `message`.""" diff --git a/src/openai/types/conversations/output_text_content.py b/src/openai/types/conversations/output_text_content.py new file mode 100644 index 0000000000..2ffee76526 --- /dev/null +++ b/src/openai/types/conversations/output_text_content.py @@ -0,0 +1,30 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from .lob_prob import LobProb +from ..._models import BaseModel +from .url_citation_body import URLCitationBody +from .file_citation_body import FileCitationBody +from .container_file_citation_body import ContainerFileCitationBody + +__all__ = ["OutputTextContent", "Annotation"] + +Annotation: TypeAlias = Annotated[ + Union[FileCitationBody, URLCitationBody, ContainerFileCitationBody], PropertyInfo(discriminator="type") +] + + +class OutputTextContent(BaseModel): + annotations: List[Annotation] + """The annotations of the text output.""" + + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + logprobs: Optional[List[LobProb]] = None diff --git a/src/openai/types/conversations/refusal_content.py b/src/openai/types/conversations/refusal_content.py new file mode 100644 index 0000000000..3c8bd5e35f --- /dev/null +++ b/src/openai/types/conversations/refusal_content.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["RefusalContent"] + + +class RefusalContent(BaseModel): + refusal: str + """The refusal explanation from the model.""" + + type: Literal["refusal"] + """The type of the refusal. Always `refusal`.""" diff --git a/src/openai/types/conversations/summary_text_content.py b/src/openai/types/conversations/summary_text_content.py new file mode 100644 index 0000000000..047769ed67 --- /dev/null +++ b/src/openai/types/conversations/summary_text_content.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["SummaryTextContent"] + + +class SummaryTextContent(BaseModel): + text: str + + type: Literal["summary_text"] diff --git a/src/openai/types/conversations/text_content.py b/src/openai/types/conversations/text_content.py new file mode 100644 index 0000000000..f1ae079597 --- /dev/null +++ b/src/openai/types/conversations/text_content.py @@ -0,0 +1,13 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["TextContent"] + + +class TextContent(BaseModel): + text: str + + type: Literal["text"] diff --git a/src/openai/types/conversations/top_log_prob.py b/src/openai/types/conversations/top_log_prob.py new file mode 100644 index 0000000000..fafca756ae --- /dev/null +++ b/src/openai/types/conversations/top_log_prob.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List + +from ..._models import BaseModel + +__all__ = ["TopLogProb"] + + +class TopLogProb(BaseModel): + token: str + + bytes: List[int] + + logprob: float diff --git a/src/openai/types/conversations/url_citation_body.py b/src/openai/types/conversations/url_citation_body.py new file mode 100644 index 0000000000..1becb44bc0 --- /dev/null +++ b/src/openai/types/conversations/url_citation_body.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["URLCitationBody"] + + +class URLCitationBody(BaseModel): + end_index: int + """The index of the last character of the URL citation in the message.""" + + start_index: int + """The index of the first character of the URL citation in the message.""" + + title: str + """The title of the web resource.""" + + type: Literal["url_citation"] + """The type of the URL citation. Always `url_citation`.""" + + url: str + """The URL of the web resource.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index bb39d1d3e5..efcab9adb8 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -23,10 +23,10 @@ "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateMessage", - "InputMessagesTemplateTemplateMessageContent", - "InputMessagesTemplateTemplateMessageContentOutputText", - "InputMessagesTemplateTemplateMessageContentInputImage", + "InputMessagesTemplateTemplateEvalItem", + "InputMessagesTemplateTemplateEvalItemContent", + "InputMessagesTemplateTemplateEvalItemContentOutputText", + "InputMessagesTemplateTemplateEvalItemContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -87,7 +87,7 @@ class SourceStoredCompletions(BaseModel): ] -class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): +class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): text: str """The text output from the model.""" @@ -95,7 +95,7 @@ class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel): +class InputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): image_url: str """The URL of the image input.""" @@ -109,17 +109,17 @@ class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel): """ -InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ +InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ str, ResponseInputText, - InputMessagesTemplateTemplateMessageContentOutputText, - InputMessagesTemplateTemplateMessageContentInputImage, + InputMessagesTemplateTemplateEvalItemContentOutputText, + InputMessagesTemplateTemplateEvalItemContentInputImage, List[object], ] -class InputMessagesTemplateTemplateMessage(BaseModel): - content: InputMessagesTemplateTemplateMessageContent +class InputMessagesTemplateTemplateEvalItem(BaseModel): + content: InputMessagesTemplateTemplateEvalItemContent """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] @@ -132,9 +132,7 @@ class InputMessagesTemplateTemplateMessage(BaseModel): """The type of the message input. Always `message`.""" -InputMessagesTemplateTemplate: TypeAlias = Annotated[ - Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type") -] +InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessage, InputMessagesTemplateTemplateEvalItem] class InputMessagesTemplate(BaseModel): diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index 7c71ecbe88..effa658452 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -23,10 +23,10 @@ "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateMessage", - "InputMessagesTemplateTemplateMessageContent", - "InputMessagesTemplateTemplateMessageContentOutputText", - "InputMessagesTemplateTemplateMessageContentInputImage", + "InputMessagesTemplateTemplateEvalItem", + "InputMessagesTemplateTemplateEvalItemContent", + "InputMessagesTemplateTemplateEvalItemContentOutputText", + "InputMessagesTemplateTemplateEvalItemContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -85,7 +85,7 @@ class SourceStoredCompletions(TypedDict, total=False): Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions] -class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False): +class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False): text: Required[str] """The text output from the model.""" @@ -93,7 +93,7 @@ class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=Fal """The type of the output text. Always `output_text`.""" -class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=False): +class InputMessagesTemplateTemplateEvalItemContentInputImage(TypedDict, total=False): image_url: Required[str] """The URL of the image input.""" @@ -107,17 +107,17 @@ class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=Fal """ -InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ +InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ str, ResponseInputTextParam, - InputMessagesTemplateTemplateMessageContentOutputText, - InputMessagesTemplateTemplateMessageContentInputImage, + InputMessagesTemplateTemplateEvalItemContentOutputText, + InputMessagesTemplateTemplateEvalItemContentInputImage, Iterable[object], ] -class InputMessagesTemplateTemplateMessage(TypedDict, total=False): - content: Required[InputMessagesTemplateTemplateMessageContent] +class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + content: Required[InputMessagesTemplateTemplateEvalItemContent] """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] @@ -130,7 +130,7 @@ class InputMessagesTemplateTemplateMessage(TypedDict, total=False): """The type of the message input. Always `message`.""" -InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage] +InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateEvalItem] class InputMessagesTemplate(TypedDict, total=False): diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 74d8688081..7c574ed315 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -79,6 +79,7 @@ from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall +from .response_conversation_param import ResponseConversationParam as ResponseConversationParam from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig from .response_function_tool_call import ResponseFunctionToolCall as ResponseFunctionToolCall from .response_input_message_item import ResponseInputMessageItem as ResponseInputMessageItem diff --git a/src/openai/types/responses/input_item_list_params.py b/src/openai/types/responses/input_item_list_params.py index 6a18d920cb..44a8dc5de3 100644 --- a/src/openai/types/responses/input_item_list_params.py +++ b/src/openai/types/responses/input_item_list_params.py @@ -14,9 +14,6 @@ class InputItemListParams(TypedDict, total=False): after: str """An item ID to list items after, used in pagination.""" - before: str - """An item ID to list items before, used in pagination.""" - include: List[ResponseIncludable] """Additional fields to include in the response. diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 49f60bbc5c..ce9effd75e 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -22,7 +22,7 @@ from .tool_choice_function import ToolChoiceFunction from ..shared.responses_model import ResponsesModel -__all__ = ["Response", "IncompleteDetails", "ToolChoice"] +__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Conversation"] class IncompleteDetails(BaseModel): @@ -35,6 +35,11 @@ class IncompleteDetails(BaseModel): ] +class Conversation(BaseModel): + id: str + """The unique ID of the conversation.""" + + class Response(BaseModel): id: str """Unique identifier for this Response.""" @@ -141,6 +146,13 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/background). """ + conversation: Optional[Conversation] = None + """The conversation that this response belongs to. + + Input items and output items from this response are automatically added to this + conversation. + """ + max_output_tokens: Optional[int] = None """ An upper bound for the number of tokens that can be generated for a response, @@ -161,6 +173,7 @@ class Response(BaseModel): Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. """ prompt: Optional[ResponsePrompt] = None diff --git a/src/openai/types/responses/response_conversation_param.py b/src/openai/types/responses/response_conversation_param.py new file mode 100644 index 0000000000..067bdc7a31 --- /dev/null +++ b/src/openai/types/responses/response_conversation_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ResponseConversationParam"] + + +class ResponseConversationParam(TypedDict, total=False): + id: Required[str] + """The unique ID of the conversation.""" diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 0cd761fcf0..5129b8b771 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -18,10 +18,12 @@ from .tool_choice_allowed_param import ToolChoiceAllowedParam from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam +from .response_conversation_param import ResponseConversationParam from ..shared_params.responses_model import ResponsesModel __all__ = [ "ResponseCreateParamsBase", + "Conversation", "StreamOptions", "ToolChoice", "ResponseCreateParamsNonStreaming", @@ -36,6 +38,14 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/background). """ + conversation: Optional[Conversation] + """The conversation that this response belongs to. + + Items from this conversation are prepended to `input_items` for this response + request. Input items and output items from this response are automatically added + to this conversation after this response completes. + """ + include: Optional[List[ResponseIncludable]] """Specify additional output data to include in the model response. @@ -118,6 +128,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): Use this to create multi-turn conversations. Learn more about [conversation state](https://platform.openai.com/docs/guides/conversation-state). + Cannot be used in conjunction with `conversation`. """ prompt: Optional[ResponsePromptParam] @@ -253,6 +264,9 @@ class ResponseCreateParamsBase(TypedDict, total=False): """ +Conversation: TypeAlias = Union[str, ResponseConversationParam] + + class StreamOptions(TypedDict, total=False): include_obfuscation: bool """When true, stream obfuscation will be enabled. diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index 455ba01666..d46f8cb0be 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -15,7 +15,7 @@ "Tool", "Mcp", "McpAllowedTools", - "McpAllowedToolsMcpAllowedToolsFilter", + "McpAllowedToolsMcpToolFilter", "McpRequireApproval", "McpRequireApprovalMcpToolApprovalFilter", "McpRequireApprovalMcpToolApprovalFilterAlways", @@ -29,30 +29,54 @@ ] -class McpAllowedToolsMcpAllowedToolsFilter(BaseModel): +class McpAllowedToolsMcpToolFilter(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: Optional[List[str]] = None """List of allowed tool names.""" -McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter, None] +McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter, None] class McpRequireApprovalMcpToolApprovalFilterAlways(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: Optional[List[str]] = None - """List of tools that require approval.""" + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilterNever(BaseModel): + read_only: Optional[bool] = None + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: Optional[List[str]] = None - """List of tools that do not require approval.""" + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilter(BaseModel): always: Optional[McpRequireApprovalMcpToolApprovalFilterAlways] = None - """A list of tools that always require approval.""" + """A filter object to specify which tools are allowed.""" never: Optional[McpRequireApprovalMcpToolApprovalFilterNever] = None - """A list of tools that never require approval.""" + """A filter object to specify which tools are allowed.""" McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"], None] @@ -62,15 +86,49 @@ class Mcp(BaseModel): server_label: str """A label for this MCP server, used to identify it in tool calls.""" - server_url: str - """The URL for the MCP server.""" - type: Literal["mcp"] """The type of the MCP tool. Always `mcp`.""" allowed_tools: Optional[McpAllowedTools] = None """List of allowed tool names or a filter object.""" + authorization: Optional[str] = None + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Optional[ + Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + ] = None + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + headers: Optional[Dict[str, str]] = None """Optional HTTP headers to send to the MCP server. @@ -83,6 +141,12 @@ class Mcp(BaseModel): server_description: Optional[str] = None """Optional description of the MCP server, used to provide more context.""" + server_url: Optional[str] = None + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + class CodeInterpreterContainerCodeInterpreterToolAuto(BaseModel): type: Literal["auto"] diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index f91e758559..9dde42e294 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -16,7 +16,7 @@ "ToolParam", "Mcp", "McpAllowedTools", - "McpAllowedToolsMcpAllowedToolsFilter", + "McpAllowedToolsMcpToolFilter", "McpRequireApproval", "McpRequireApprovalMcpToolApprovalFilter", "McpRequireApprovalMcpToolApprovalFilterAlways", @@ -30,30 +30,54 @@ ] -class McpAllowedToolsMcpAllowedToolsFilter(TypedDict, total=False): +class McpAllowedToolsMcpToolFilter(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: List[str] """List of allowed tool names.""" -McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpAllowedToolsFilter] +McpAllowedTools: TypeAlias = Union[List[str], McpAllowedToolsMcpToolFilter] class McpRequireApprovalMcpToolApprovalFilterAlways(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: List[str] - """List of tools that require approval.""" + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilterNever(TypedDict, total=False): + read_only: bool + """Indicates whether or not a tool modifies data or is read-only. + + If an MCP server is + [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint), + it will match this filter. + """ + tool_names: List[str] - """List of tools that do not require approval.""" + """List of allowed tool names.""" class McpRequireApprovalMcpToolApprovalFilter(TypedDict, total=False): always: McpRequireApprovalMcpToolApprovalFilterAlways - """A list of tools that always require approval.""" + """A filter object to specify which tools are allowed.""" never: McpRequireApprovalMcpToolApprovalFilterNever - """A list of tools that never require approval.""" + """A filter object to specify which tools are allowed.""" McpRequireApproval: TypeAlias = Union[McpRequireApprovalMcpToolApprovalFilter, Literal["always", "never"]] @@ -63,15 +87,47 @@ class Mcp(TypedDict, total=False): server_label: Required[str] """A label for this MCP server, used to identify it in tool calls.""" - server_url: Required[str] - """The URL for the MCP server.""" - type: Required[Literal["mcp"]] """The type of the MCP tool. Always `mcp`.""" allowed_tools: Optional[McpAllowedTools] """List of allowed tool names or a filter object.""" + authorization: str + """ + An OAuth access token that can be used with a remote MCP server, either with a + custom MCP server URL or a service connector. Your application must handle the + OAuth authorization flow and provide the token here. + """ + + connector_id: Literal[ + "connector_dropbox", + "connector_gmail", + "connector_googlecalendar", + "connector_googledrive", + "connector_microsoftteams", + "connector_outlookcalendar", + "connector_outlookemail", + "connector_sharepoint", + ] + """Identifier for service connectors, like those available in ChatGPT. + + One of `server_url` or `connector_id` must be provided. Learn more about service + connectors + [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors). + + Currently supported `connector_id` values are: + + - Dropbox: `connector_dropbox` + - Gmail: `connector_gmail` + - Google Calendar: `connector_googlecalendar` + - Google Drive: `connector_googledrive` + - Microsoft Teams: `connector_microsoftteams` + - Outlook Calendar: `connector_outlookcalendar` + - Outlook Email: `connector_outlookemail` + - SharePoint: `connector_sharepoint` + """ + headers: Optional[Dict[str, str]] """Optional HTTP headers to send to the MCP server. @@ -84,6 +140,12 @@ class Mcp(TypedDict, total=False): server_description: str """Optional description of the MCP server, used to provide more context.""" + server_url: str + """The URL for the MCP server. + + One of `server_url` or `connector_id` must be provided. + """ + class CodeInterpreterContainerCodeInterpreterToolAuto(TypedDict, total=False): type: Required[Literal["auto"]] diff --git a/tests/api_resources/conversations/__init__.py b/tests/api_resources/conversations/__init__.py new file mode 100644 index 0000000000..fd8019a9a1 --- /dev/null +++ b/tests/api_resources/conversations/__init__.py @@ -0,0 +1 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. diff --git a/tests/api_resources/conversations/test_items.py b/tests/api_resources/conversations/test_items.py new file mode 100644 index 0000000000..c308160543 --- /dev/null +++ b/tests/api_resources/conversations/test_items.py @@ -0,0 +1,491 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.pagination import SyncConversationCursorPage, AsyncConversationCursorPage +from openai.types.conversations import ( + Conversation, + ConversationItem, + ConversationItemList, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestItems: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: OpenAI) -> None: + item = client.conversations.items.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) + assert_matches_type(ConversationItemList, item, path=["response"]) + + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + item = client.conversations.items.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + "type": "message", + } + ], + include=["code_interpreter_call.outputs"], + ) + assert_matches_type(ConversationItemList, item, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: OpenAI) -> None: + response = client.conversations.items.with_raw_response.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(ConversationItemList, item, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: OpenAI) -> None: + with client.conversations.items.with_streaming_response.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = response.parse() + assert_matches_type(ConversationItemList, item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_create(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + client.conversations.items.with_raw_response.create( + conversation_id="", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) + + @parametrize + def test_method_retrieve(self, client: OpenAI) -> None: + item = client.conversations.items.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + ) + assert_matches_type(ConversationItem, item, path=["response"]) + + @parametrize + def test_method_retrieve_with_all_params(self, client: OpenAI) -> None: + item = client.conversations.items.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + include=["code_interpreter_call.outputs"], + ) + assert_matches_type(ConversationItem, item, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: OpenAI) -> None: + response = client.conversations.items.with_raw_response.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(ConversationItem, item, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: OpenAI) -> None: + with client.conversations.items.with_streaming_response.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = response.parse() + assert_matches_type(ConversationItem, item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + client.conversations.items.with_raw_response.retrieve( + item_id="msg_abc", + conversation_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"): + client.conversations.items.with_raw_response.retrieve( + item_id="", + conversation_id="conv_123", + ) + + @parametrize + def test_method_list(self, client: OpenAI) -> None: + item = client.conversations.items.list( + conversation_id="conv_123", + ) + assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"]) + + @parametrize + def test_method_list_with_all_params(self, client: OpenAI) -> None: + item = client.conversations.items.list( + conversation_id="conv_123", + after="after", + include=["code_interpreter_call.outputs"], + limit=0, + order="asc", + ) + assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"]) + + @parametrize + def test_raw_response_list(self, client: OpenAI) -> None: + response = client.conversations.items.with_raw_response.list( + conversation_id="conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"]) + + @parametrize + def test_streaming_response_list(self, client: OpenAI) -> None: + with client.conversations.items.with_streaming_response.list( + conversation_id="conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = response.parse() + assert_matches_type(SyncConversationCursorPage[ConversationItem], item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_list(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + client.conversations.items.with_raw_response.list( + conversation_id="", + ) + + @parametrize + def test_method_delete(self, client: OpenAI) -> None: + item = client.conversations.items.delete( + item_id="msg_abc", + conversation_id="conv_123", + ) + assert_matches_type(Conversation, item, path=["response"]) + + @parametrize + def test_raw_response_delete(self, client: OpenAI) -> None: + response = client.conversations.items.with_raw_response.delete( + item_id="msg_abc", + conversation_id="conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(Conversation, item, path=["response"]) + + @parametrize + def test_streaming_response_delete(self, client: OpenAI) -> None: + with client.conversations.items.with_streaming_response.delete( + item_id="msg_abc", + conversation_id="conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = response.parse() + assert_matches_type(Conversation, item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + client.conversations.items.with_raw_response.delete( + item_id="msg_abc", + conversation_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"): + client.conversations.items.with_raw_response.delete( + item_id="", + conversation_id="conv_123", + ) + + +class TestAsyncItems: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_create(self, async_client: AsyncOpenAI) -> None: + item = await async_client.conversations.items.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) + assert_matches_type(ConversationItemList, item, path=["response"]) + + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + item = await async_client.conversations.items.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + "type": "message", + } + ], + include=["code_interpreter_call.outputs"], + ) + assert_matches_type(ConversationItemList, item, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.items.with_raw_response.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(ConversationItemList, item, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.items.with_streaming_response.create( + conversation_id="conv_123", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = await response.parse() + assert_matches_type(ConversationItemList, item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + await async_client.conversations.items.with_raw_response.create( + conversation_id="", + items=[ + { + "content": "string", + "role": "user", + } + ], + ) + + @parametrize + async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: + item = await async_client.conversations.items.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + ) + assert_matches_type(ConversationItem, item, path=["response"]) + + @parametrize + async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None: + item = await async_client.conversations.items.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + include=["code_interpreter_call.outputs"], + ) + assert_matches_type(ConversationItem, item, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.items.with_raw_response.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(ConversationItem, item, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.items.with_streaming_response.retrieve( + item_id="msg_abc", + conversation_id="conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = await response.parse() + assert_matches_type(ConversationItem, item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + await async_client.conversations.items.with_raw_response.retrieve( + item_id="msg_abc", + conversation_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"): + await async_client.conversations.items.with_raw_response.retrieve( + item_id="", + conversation_id="conv_123", + ) + + @parametrize + async def test_method_list(self, async_client: AsyncOpenAI) -> None: + item = await async_client.conversations.items.list( + conversation_id="conv_123", + ) + assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"]) + + @parametrize + async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: + item = await async_client.conversations.items.list( + conversation_id="conv_123", + after="after", + include=["code_interpreter_call.outputs"], + limit=0, + order="asc", + ) + assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"]) + + @parametrize + async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.items.with_raw_response.list( + conversation_id="conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"]) + + @parametrize + async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.items.with_streaming_response.list( + conversation_id="conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = await response.parse() + assert_matches_type(AsyncConversationCursorPage[ConversationItem], item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + await async_client.conversations.items.with_raw_response.list( + conversation_id="", + ) + + @parametrize + async def test_method_delete(self, async_client: AsyncOpenAI) -> None: + item = await async_client.conversations.items.delete( + item_id="msg_abc", + conversation_id="conv_123", + ) + assert_matches_type(Conversation, item, path=["response"]) + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.items.with_raw_response.delete( + item_id="msg_abc", + conversation_id="conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + item = response.parse() + assert_matches_type(Conversation, item, path=["response"]) + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.items.with_streaming_response.delete( + item_id="msg_abc", + conversation_id="conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + item = await response.parse() + assert_matches_type(Conversation, item, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + await async_client.conversations.items.with_raw_response.delete( + item_id="msg_abc", + conversation_id="", + ) + + with pytest.raises(ValueError, match=r"Expected a non-empty value for `item_id` but received ''"): + await async_client.conversations.items.with_raw_response.delete( + item_id="", + conversation_id="conv_123", + ) diff --git a/tests/api_resources/responses/test_input_items.py b/tests/api_resources/responses/test_input_items.py index e8e3893bad..eda20c9a0b 100644 --- a/tests/api_resources/responses/test_input_items.py +++ b/tests/api_resources/responses/test_input_items.py @@ -30,7 +30,6 @@ def test_method_list_with_all_params(self, client: OpenAI) -> None: input_item = client.responses.input_items.list( response_id="response_id", after="after", - before="before", include=["code_interpreter_call.outputs"], limit=0, order="asc", @@ -86,7 +85,6 @@ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> N input_item = await async_client.responses.input_items.list( response_id="response_id", after="after", - before="before", include=["code_interpreter_call.outputs"], limit=0, order="asc", diff --git a/tests/api_resources/test_conversations.py b/tests/api_resources/test_conversations.py new file mode 100644 index 0000000000..d21e685a04 --- /dev/null +++ b/tests/api_resources/test_conversations.py @@ -0,0 +1,341 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +import os +from typing import Any, cast + +import pytest + +from openai import OpenAI, AsyncOpenAI +from tests.utils import assert_matches_type +from openai.types.conversations import ( + Conversation, + ConversationDeletedResource, +) + +base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") + + +class TestConversations: + parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) + + @parametrize + def test_method_create(self, client: OpenAI) -> None: + conversation = client.conversations.create() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + conversation = client.conversations.create( + items=[ + { + "content": "string", + "role": "user", + "type": "message", + } + ], + metadata={"foo": "string"}, + ) + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + def test_raw_response_create(self, client: OpenAI) -> None: + response = client.conversations.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + def test_streaming_response_create(self, client: OpenAI) -> None: + with client.conversations.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_method_retrieve(self, client: OpenAI) -> None: + conversation = client.conversations.retrieve( + "conv_123", + ) + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + def test_raw_response_retrieve(self, client: OpenAI) -> None: + response = client.conversations.with_raw_response.retrieve( + "conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + def test_streaming_response_retrieve(self, client: OpenAI) -> None: + with client.conversations.with_streaming_response.retrieve( + "conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_retrieve(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + client.conversations.with_raw_response.retrieve( + "", + ) + + @parametrize + def test_method_update(self, client: OpenAI) -> None: + conversation = client.conversations.update( + conversation_id="conv_123", + metadata={"foo": "string"}, + ) + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + def test_raw_response_update(self, client: OpenAI) -> None: + response = client.conversations.with_raw_response.update( + conversation_id="conv_123", + metadata={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + def test_streaming_response_update(self, client: OpenAI) -> None: + with client.conversations.with_streaming_response.update( + conversation_id="conv_123", + metadata={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_update(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + client.conversations.with_raw_response.update( + conversation_id="", + metadata={"foo": "string"}, + ) + + @parametrize + def test_method_delete(self, client: OpenAI) -> None: + conversation = client.conversations.delete( + "conv_123", + ) + assert_matches_type(ConversationDeletedResource, conversation, path=["response"]) + + @parametrize + def test_raw_response_delete(self, client: OpenAI) -> None: + response = client.conversations.with_raw_response.delete( + "conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(ConversationDeletedResource, conversation, path=["response"]) + + @parametrize + def test_streaming_response_delete(self, client: OpenAI) -> None: + with client.conversations.with_streaming_response.delete( + "conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = response.parse() + assert_matches_type(ConversationDeletedResource, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + def test_path_params_delete(self, client: OpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + client.conversations.with_raw_response.delete( + "", + ) + + +class TestAsyncConversations: + parametrize = pytest.mark.parametrize( + "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"] + ) + + @parametrize + async def test_method_create(self, async_client: AsyncOpenAI) -> None: + conversation = await async_client.conversations.create() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + conversation = await async_client.conversations.create( + items=[ + { + "content": "string", + "role": "user", + "type": "message", + } + ], + metadata={"foo": "string"}, + ) + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.with_raw_response.create() + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.with_streaming_response.create() as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = await response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: + conversation = await async_client.conversations.retrieve( + "conv_123", + ) + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.with_raw_response.retrieve( + "conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.with_streaming_response.retrieve( + "conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = await response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + await async_client.conversations.with_raw_response.retrieve( + "", + ) + + @parametrize + async def test_method_update(self, async_client: AsyncOpenAI) -> None: + conversation = await async_client.conversations.update( + conversation_id="conv_123", + metadata={"foo": "string"}, + ) + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.with_raw_response.update( + conversation_id="conv_123", + metadata={"foo": "string"}, + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + @parametrize + async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.with_streaming_response.update( + conversation_id="conv_123", + metadata={"foo": "string"}, + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = await response.parse() + assert_matches_type(Conversation, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + await async_client.conversations.with_raw_response.update( + conversation_id="", + metadata={"foo": "string"}, + ) + + @parametrize + async def test_method_delete(self, async_client: AsyncOpenAI) -> None: + conversation = await async_client.conversations.delete( + "conv_123", + ) + assert_matches_type(ConversationDeletedResource, conversation, path=["response"]) + + @parametrize + async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: + response = await async_client.conversations.with_raw_response.delete( + "conv_123", + ) + + assert response.is_closed is True + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + conversation = response.parse() + assert_matches_type(ConversationDeletedResource, conversation, path=["response"]) + + @parametrize + async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: + async with async_client.conversations.with_streaming_response.delete( + "conv_123", + ) as response: + assert not response.is_closed + assert response.http_request.headers.get("X-Stainless-Lang") == "python" + + conversation = await response.parse() + assert_matches_type(ConversationDeletedResource, conversation, path=["response"]) + + assert cast(Any, response.is_closed) is True + + @parametrize + async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: + with pytest.raises(ValueError, match=r"Expected a non-empty value for `conversation_id` but received ''"): + await async_client.conversations.with_raw_response.delete( + "", + ) diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 310800b87e..0cc20e926b 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -29,6 +29,7 @@ def test_method_create_overload_1(self, client: OpenAI) -> None: def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: response = client.responses.create( background=True, + conversation="string", include=["code_interpreter_call.outputs"], input="string", instructions="instructions", @@ -108,6 +109,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: response_stream = client.responses.create( stream=True, background=True, + conversation="string", include=["code_interpreter_call.outputs"], input="string", instructions="instructions", @@ -380,6 +382,7 @@ async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.responses.create( background=True, + conversation="string", include=["code_interpreter_call.outputs"], input="string", instructions="instructions", @@ -459,6 +462,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn response_stream = await async_client.responses.create( stream=True, background=True, + conversation="string", include=["code_interpreter_call.outputs"], input="string", instructions="instructions", From 9fd9df51bb12956598d6e12b50a3330aa0e56272 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 22:24:33 +0000 Subject: [PATCH 86/90] chore(internal): change ci workflow machines --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5e56aae09a..4c617a6f19 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: permissions: contents: read id-token: write - runs-on: depot-ubuntu-24.04 + runs-on: ${{ github.repository == 'stainless-sdks/openai-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v4 From 7325cdbbaf88078d00fefdb830f5040272b35dda Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 13:56:43 +0000 Subject: [PATCH 87/90] chore(internal): codegen related update --- requirements-dev.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index e619cb6b64..e8bea53014 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -70,7 +70,7 @@ filelock==3.12.4 frozenlist==1.7.0 # via aiohttp # via aiosignal -griffe==1.12.1 +griffe==1.13.0 h11==0.16.0 # via httpcore httpcore==1.0.9 From 3f21bcd0b993641402e28d21621b794db0b34cc2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 16:02:14 +0000 Subject: [PATCH 88/90] fix: avoid newer type syntax --- src/openai/_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/openai/_models.py b/src/openai/_models.py index d84d51d913..50eb0af751 100644 --- a/src/openai/_models.py +++ b/src/openai/_models.py @@ -329,7 +329,7 @@ def model_dump( exclude_none=exclude_none, ) - return cast(dict[str, Any], json_safe(dumped)) if mode == "json" else dumped + return cast("dict[str, Any]", json_safe(dumped)) if mode == "json" else dumped @override def model_dump_json( From af5f9c4e9d26777364154c2961dce7a047a2b42d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 20:42:47 +0000 Subject: [PATCH 89/90] feat(api): add web search filters --- .stats.yml | 4 +- .../resources/conversations/conversations.py | 4 +- src/openai/resources/conversations/items.py | 4 ++ src/openai/resources/responses/responses.py | 12 ++++ .../types/conversations/item_list_params.py | 2 + .../types/responses/response_create_params.py | 2 + .../responses/response_function_web_search.py | 15 ++++- .../response_function_web_search_param.py | 22 ++++++- src/openai/types/responses/tool.py | 63 ++++++++++++++++++- src/openai/types/responses/tool_param.py | 61 +++++++++++++++++- 10 files changed, 178 insertions(+), 11 deletions(-) diff --git a/.stats.yml b/.stats.yml index f2d5304a5b..5ad90ac5ab 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 119 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-ddbdf9343316047e8a773c54fb24e4a8d225955e202a1888fde6f9c8898ebf98.yml -openapi_spec_hash: 9802f6dd381558466c897f6e387e06ca +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8517ffa1004e31ca2523d617629e64be6fe4f13403ddfd9db5b3be002656cbde.yml +openapi_spec_hash: b64dd8c8b23082a7aa2a3e5c5fffd8bd config_hash: fe0ea26680ac2075a6cd66416aefe7db diff --git a/src/openai/resources/conversations/conversations.py b/src/openai/resources/conversations/conversations.py index 13bc1fb1ce..802620e6ad 100644 --- a/src/openai/resources/conversations/conversations.py +++ b/src/openai/resources/conversations/conversations.py @@ -67,7 +67,7 @@ def create( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Conversation: """ - Create a conversation with the given ID. + Create a conversation. Args: items: Initial items to include in the conversation context. You may add up to 20 items @@ -244,7 +244,7 @@ async def create( timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Conversation: """ - Create a conversation with the given ID. + Create a conversation. Args: items: Initial items to include in the conversation context. You may add up to 20 items diff --git a/src/openai/resources/conversations/items.py b/src/openai/resources/conversations/items.py index 1e696a79ed..01811f956b 100644 --- a/src/openai/resources/conversations/items.py +++ b/src/openai/resources/conversations/items.py @@ -163,6 +163,8 @@ def list( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer @@ -391,6 +393,8 @@ def list( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index d0862f5d76..062fd491f2 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -136,6 +136,8 @@ def create( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer @@ -377,6 +379,8 @@ def create( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer @@ -611,6 +615,8 @@ def create( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer @@ -1524,6 +1530,8 @@ async def create( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer @@ -1765,6 +1773,8 @@ async def create( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer @@ -1999,6 +2009,8 @@ async def create( include: Specify additional output data to include in the model response. Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer diff --git a/src/openai/types/conversations/item_list_params.py b/src/openai/types/conversations/item_list_params.py index 34bf43c559..a4dd61f399 100644 --- a/src/openai/types/conversations/item_list_params.py +++ b/src/openai/types/conversations/item_list_params.py @@ -19,6 +19,8 @@ class ItemListParams(TypedDict, total=False): Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 5129b8b771..ff28c05816 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -51,6 +51,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): Currently supported values are: + - `web_search_call.action.sources`: Include the sources of the web search tool + call. - `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items. - `computer_call_output.output.image_url`: Include image urls from the computer diff --git a/src/openai/types/responses/response_function_web_search.py b/src/openai/types/responses/response_function_web_search.py index a3252956e9..f3e80e6a8f 100644 --- a/src/openai/types/responses/response_function_web_search.py +++ b/src/openai/types/responses/response_function_web_search.py @@ -1,12 +1,20 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo from ..._models import BaseModel -__all__ = ["ResponseFunctionWebSearch", "Action", "ActionSearch", "ActionOpenPage", "ActionFind"] +__all__ = ["ResponseFunctionWebSearch", "Action", "ActionSearch", "ActionSearchSource", "ActionOpenPage", "ActionFind"] + + +class ActionSearchSource(BaseModel): + type: Literal["url"] + """The type of source. Always `url`.""" + + url: str + """The URL of the source.""" class ActionSearch(BaseModel): @@ -16,6 +24,9 @@ class ActionSearch(BaseModel): type: Literal["search"] """The action type.""" + sources: Optional[List[ActionSearchSource]] = None + """The sources used in the search.""" + class ActionOpenPage(BaseModel): type: Literal["open_page"] diff --git a/src/openai/types/responses/response_function_web_search_param.py b/src/openai/types/responses/response_function_web_search_param.py index 4a06132cf4..fc019d3eb7 100644 --- a/src/openai/types/responses/response_function_web_search_param.py +++ b/src/openai/types/responses/response_function_web_search_param.py @@ -2,10 +2,25 @@ from __future__ import annotations -from typing import Union +from typing import Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = ["ResponseFunctionWebSearchParam", "Action", "ActionSearch", "ActionOpenPage", "ActionFind"] +__all__ = [ + "ResponseFunctionWebSearchParam", + "Action", + "ActionSearch", + "ActionSearchSource", + "ActionOpenPage", + "ActionFind", +] + + +class ActionSearchSource(TypedDict, total=False): + type: Required[Literal["url"]] + """The type of source. Always `url`.""" + + url: Required[str] + """The URL of the source.""" class ActionSearch(TypedDict, total=False): @@ -15,6 +30,9 @@ class ActionSearch(TypedDict, total=False): type: Required[Literal["search"]] """The action type.""" + sources: Iterable[ActionSearchSource] + """The sources used in the search.""" + class ActionOpenPage(TypedDict, total=False): type: Required[Literal["open_page"]] diff --git a/src/openai/types/responses/tool.py b/src/openai/types/responses/tool.py index d46f8cb0be..0fe7133804 100644 --- a/src/openai/types/responses/tool.py +++ b/src/openai/types/responses/tool.py @@ -3,16 +3,19 @@ from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias +from . import web_search_tool from ..._utils import PropertyInfo from ..._models import BaseModel from .custom_tool import CustomTool from .computer_tool import ComputerTool from .function_tool import FunctionTool -from .web_search_tool import WebSearchTool from .file_search_tool import FileSearchTool __all__ = [ "Tool", + "WebSearchTool", + "WebSearchToolFilters", + "WebSearchToolUserLocation", "Mcp", "McpAllowedTools", "McpAllowedToolsMcpToolFilter", @@ -29,6 +32,61 @@ ] +class WebSearchToolFilters(BaseModel): + allowed_domains: Optional[List[str]] = None + """Allowed domains for the search. + + If not provided, all domains are allowed. Subdomains of the provided domains are + allowed as well. + + Example: `["pubmed.ncbi.nlm.nih.gov"]` + """ + + +class WebSearchToolUserLocation(BaseModel): + city: Optional[str] = None + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: Optional[str] = None + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: Optional[str] = None + """Free text input for the region of the user, e.g. `California`.""" + + timezone: Optional[str] = None + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ + + type: Optional[Literal["approximate"]] = None + """The type of location approximation. Always `approximate`.""" + + +class WebSearchTool(BaseModel): + type: Literal["web_search", "web_search_2025_08_26"] + """The type of the web search tool. + + One of `web_search` or `web_search_2025_08_26`. + """ + + filters: Optional[WebSearchToolFilters] = None + """Filters for the search.""" + + search_context_size: Optional[Literal["low", "medium", "high"]] = None + """High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[WebSearchToolUserLocation] = None + """The approximate location of the user.""" + + class McpAllowedToolsMcpToolFilter(BaseModel): read_only: Optional[bool] = None """Indicates whether or not a tool modifies data or is read-only. @@ -245,13 +303,14 @@ class LocalShell(BaseModel): Union[ FunctionTool, FileSearchTool, - WebSearchTool, ComputerTool, + WebSearchTool, Mcp, CodeInterpreter, ImageGeneration, LocalShell, CustomTool, + web_search_tool.WebSearchTool, ], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/responses/tool_param.py b/src/openai/types/responses/tool_param.py index 9dde42e294..aff9359efa 100644 --- a/src/openai/types/responses/tool_param.py +++ b/src/openai/types/responses/tool_param.py @@ -14,6 +14,9 @@ __all__ = [ "ToolParam", + "WebSearchTool", + "WebSearchToolFilters", + "WebSearchToolUserLocation", "Mcp", "McpAllowedTools", "McpAllowedToolsMcpToolFilter", @@ -30,6 +33,61 @@ ] +class WebSearchToolFilters(TypedDict, total=False): + allowed_domains: Optional[List[str]] + """Allowed domains for the search. + + If not provided, all domains are allowed. Subdomains of the provided domains are + allowed as well. + + Example: `["pubmed.ncbi.nlm.nih.gov"]` + """ + + +class WebSearchToolUserLocation(TypedDict, total=False): + city: Optional[str] + """Free text input for the city of the user, e.g. `San Francisco`.""" + + country: Optional[str] + """ + The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of + the user, e.g. `US`. + """ + + region: Optional[str] + """Free text input for the region of the user, e.g. `California`.""" + + timezone: Optional[str] + """ + The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the + user, e.g. `America/Los_Angeles`. + """ + + type: Literal["approximate"] + """The type of location approximation. Always `approximate`.""" + + +class WebSearchTool(TypedDict, total=False): + type: Required[Literal["web_search", "web_search_2025_08_26"]] + """The type of the web search tool. + + One of `web_search` or `web_search_2025_08_26`. + """ + + filters: Optional[WebSearchToolFilters] + """Filters for the search.""" + + search_context_size: Literal["low", "medium", "high"] + """High level guidance for the amount of context window space to use for the + search. + + One of `low`, `medium`, or `high`. `medium` is the default. + """ + + user_location: Optional[WebSearchToolUserLocation] + """The approximate location of the user.""" + + class McpAllowedToolsMcpToolFilter(TypedDict, total=False): read_only: bool """Indicates whether or not a tool modifies data or is read-only. @@ -243,13 +301,14 @@ class LocalShell(TypedDict, total=False): ToolParam: TypeAlias = Union[ FunctionToolParam, FileSearchToolParam, - WebSearchToolParam, ComputerToolParam, + WebSearchTool, Mcp, CodeInterpreter, ImageGeneration, LocalShell, CustomToolParam, + WebSearchToolParam, ] From 3154a78ac8cb404d64707d63cdfe72d3db8a45be Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 20:43:47 +0000 Subject: [PATCH 90/90] release: 1.102.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 070375331a..98411f0f2b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.101.0" + ".": "1.102.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 44b25e0a4c..26ca1c5cb2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 1.102.0 (2025-08-26) + +Full Changelog: [v1.101.0...v1.102.0](https://github.com/openai/openai-python/compare/v1.101.0...v1.102.0) + +### Features + +* **api:** add web search filters ([1c199a8](https://github.com/openai/openai-python/commit/1c199a8dc85f773ae656fe850fdfb80b91f8f6b1)) + + +### Bug Fixes + +* avoid newer type syntax ([bd0c668](https://github.com/openai/openai-python/commit/bd0c668d754b89c78c2c9ad2e081258c04aaece6)) + + +### Chores + +* **internal:** change ci workflow machines ([3e129d5](https://github.com/openai/openai-python/commit/3e129d5e49f6391dea7497132cb3cfed8e5dd8ee)) +* **internal:** codegen related update ([b6dc170](https://github.com/openai/openai-python/commit/b6dc170832d719fc5028cfe234748c22e6e168aa)) + ## 1.101.0 (2025-08-21) Full Changelog: [v1.100.3...v1.101.0](https://github.com/openai/openai-python/compare/v1.100.3...v1.101.0) diff --git a/pyproject.toml b/pyproject.toml index 8198b178be..6736c1ad9e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.101.0" +version = "1.102.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 802084af5d..b2d62263ff 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.101.0" # x-release-please-version +__version__ = "1.102.0" # x-release-please-version