From e3dfa7c417b8c750ff62d98650e75e72ad9b1477 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 15 Aug 2025 19:11:41 +0000 Subject: [PATCH 1/2] feat(api): add new text parameters, expiration options --- .stats.yml | 6 +- src/openai/resources/batches.py | 10 ++ .../resources/beta/realtime/realtime.py | 8 +- .../resources/beta/realtime/sessions.py | 4 +- .../beta/realtime/transcription_sessions.py | 4 +- .../resources/beta/threads/runs/runs.py | 12 +- src/openai/resources/beta/threads/threads.py | 12 +- .../resources/chat/completions/completions.py | 48 +++++--- src/openai/resources/files.py | 14 ++- src/openai/resources/responses/responses.py | 107 ++++++------------ src/openai/resources/uploads/uploads.py | 10 ++ src/openai/types/batch_create_params.py | 23 +++- src/openai/types/beta/realtime/session.py | 2 +- .../beta/realtime/session_create_params.py | 2 +- .../beta/realtime/session_update_event.py | 2 +- .../realtime/session_update_event_param.py | 2 +- .../transcription_session_create_params.py | 2 +- .../realtime/transcription_session_update.py | 2 +- .../transcription_session_update_param.py | 2 +- .../beta/thread_create_and_run_params.py | 2 +- src/openai/types/beta/threads/run.py | 2 +- .../types/beta/threads/run_create_params.py | 2 +- src/openai/types/chat/chat_completion.py | 5 +- .../types/chat/chat_completion_chunk.py | 5 +- .../types/chat/completion_create_params.py | 18 ++- src/openai/types/file_create_params.py | 25 +++- src/openai/types/responses/__init__.py | 2 - src/openai/types/responses/response.py | 46 +++++--- .../types/responses/response_create_params.py | 45 +++++--- .../types/responses/response_text_config.py | 35 ------ .../responses/response_text_config_param.py | 36 ------ src/openai/types/upload_create_params.py | 25 +++- tests/api_resources/chat/test_completions.py | 4 + tests/api_resources/test_batches.py | 8 ++ tests/api_resources/test_files.py | 24 ++++ tests/api_resources/test_responses.py | 4 +- tests/api_resources/test_uploads.py | 28 +++++ 37 files changed, 343 insertions(+), 245 deletions(-) delete mode 100644 src/openai/types/responses/response_text_config.py delete mode 100644 src/openai/types/responses/response_text_config_param.py diff --git a/.stats.yml b/.stats.yml index a098c3d40d..66c46e7730 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9cadfad609f94f20ebf74fdc06a80302f1a324dc69700a309a8056aabca82fd2.yml -openapi_spec_hash: 3eb8d86c06f0bb5e1190983e5acfc9ba -config_hash: 68337b532875626269c304372a669f67 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml +openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15 +config_hash: ed87b9139ac595a04a2162d754df2fed diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 26ea498b31..2340bd2e32 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -49,6 +49,7 @@ def create( endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -85,6 +86,9 @@ def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + output_expires_after: The expiration policy for the output and/or error file that are generated for a + batch. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -101,6 +105,7 @@ def create( "endpoint": endpoint, "input_file_id": input_file_id, "metadata": metadata, + "output_expires_after": output_expires_after, }, batch_create_params.BatchCreateParams, ), @@ -259,6 +264,7 @@ async def create( endpoint: Literal["/v1/responses", "/v1/chat/completions", "/v1/embeddings", "/v1/completions"], input_file_id: str, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, + output_expires_after: batch_create_params.OutputExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -295,6 +301,9 @@ async def create( Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. + output_expires_after: The expiration policy for the output and/or error file that are generated for a + batch. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -311,6 +320,7 @@ async def create( "endpoint": endpoint, "input_file_id": input_file_id, "metadata": metadata, + "output_expires_after": output_expires_after, }, batch_create_params.BatchCreateParams, ), diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index 8e1b558cf3..7b99c7f6c4 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -652,8 +652,8 @@ def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str | Not """Send this event to cancel an in-progress response. The server will respond - with a `response.cancelled` event or an error if there is no response to - cancel. + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. """ self._connection.send( cast( @@ -904,8 +904,8 @@ async def cancel(self, *, event_id: str | NotGiven = NOT_GIVEN, response_id: str """Send this event to cancel an in-progress response. The server will respond - with a `response.cancelled` event or an error if there is no response to - cancel. + with a `response.done` event with a status of `response.status=cancelled`. If + there is no response to cancel, the server will respond with an error. """ await self._connection.send( cast( diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index e639c0ba43..eaddb384ce 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -152,7 +152,7 @@ def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer @@ -334,7 +334,7 @@ async def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer diff --git a/src/openai/resources/beta/realtime/transcription_sessions.py b/src/openai/resources/beta/realtime/transcription_sessions.py index 5f97b3c8e3..54fe7d5a6c 100644 --- a/src/openai/resources/beta/realtime/transcription_sessions.py +++ b/src/openai/resources/beta/realtime/transcription_sessions.py @@ -96,7 +96,7 @@ def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer @@ -209,7 +209,7 @@ async def create( set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. Semantic VAD - is more advanced and uses a turn detection model (in conjuction with VAD) to + is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and wait longer diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 01246d7c12..07b43e6471 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -220,7 +220,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -370,7 +370,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -520,7 +520,7 @@ def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1650,7 +1650,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1800,7 +1800,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1950,7 +1950,7 @@ async def create( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index ff2a41155d..dbe47d2d0e 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -393,7 +393,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -527,7 +527,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -661,7 +661,7 @@ def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1251,7 +1251,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1385,7 +1385,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers @@ -1519,7 +1519,7 @@ async def create_and_run( We generally recommend altering this or temperature but not both. truncation_strategy: Controls for how a thread will be truncated prior to the run. Use this to - control the intial context window of the run. + control the initial context window of the run. extra_headers: Send extra headers diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 9404d85192..bc5fe0fc05 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -103,6 +103,7 @@ def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -203,6 +204,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -265,6 +267,7 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -438,9 +441,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -554,6 +556,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -736,9 +739,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -843,6 +845,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1025,9 +1028,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1132,6 +1134,7 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1178,6 +1181,7 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -1400,6 +1404,7 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1470,6 +1475,7 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, @@ -1542,6 +1548,7 @@ async def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1642,6 +1649,7 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), + "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -1704,6 +1712,7 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1877,9 +1886,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1993,6 +2001,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2175,9 +2184,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -2282,6 +2290,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2464,9 +2473,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -2571,6 +2579,7 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2617,6 +2626,7 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, + "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -2839,6 +2849,7 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, + text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -2910,6 +2921,7 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, + text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index 179af870ba..b45b8f303f 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -57,6 +57,7 @@ def create( *, file: FileTypes, purpose: FilePurpose, + expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -68,7 +69,7 @@ def create( Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. + to 1 TB. The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -96,6 +97,9 @@ def create( fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -108,6 +112,7 @@ def create( { "file": file, "purpose": purpose, + "expires_after": expires_after, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) @@ -369,6 +374,7 @@ async def create( *, file: FileTypes, purpose: FilePurpose, + expires_after: file_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -380,7 +386,7 @@ async def create( Individual files can be up to 512 MB, and the size of all files uploaded by one organization can be up - to 100 GB. + to 1 TB. The Assistants API supports files up to 2 million tokens and of specific file types. See the @@ -408,6 +414,9 @@ async def create( fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -420,6 +429,7 @@ async def create( { "file": file, "purpose": purpose, + "expires_after": expires_after, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 8983daf278..97ad0faa94 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -43,7 +43,6 @@ from ...types.responses.response_input_param import ResponseInputParam from ...types.responses.response_prompt_param import ResponsePromptParam from ...types.responses.response_stream_event import ResponseStreamEvent -from ...types.responses.response_text_config_param import ResponseTextConfigParam __all__ = ["Responses", "AsyncResponses"] @@ -95,7 +94,7 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -195,7 +194,7 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -214,9 +213,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -240,12 +238,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -323,7 +315,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -430,7 +422,7 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -449,9 +441,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -468,12 +459,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -551,7 +536,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -658,7 +643,7 @@ def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -677,9 +662,8 @@ def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -696,12 +680,6 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -778,7 +756,7 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -869,7 +847,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -901,7 +879,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -1030,7 +1008,7 @@ def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1461,7 +1439,7 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1561,7 +1539,7 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -1580,9 +1558,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1606,12 +1583,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1689,7 +1660,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1796,7 +1767,7 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -1815,9 +1786,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -1834,12 +1804,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1917,7 +1881,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2024,7 +1988,7 @@ async def create( hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching). - reasoning: **o-series models only** + reasoning: **gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -2043,9 +2007,8 @@ async def create( - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -2062,12 +2025,6 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. - text: Configuration options for a text response from the model. Can be plain text or - structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -2144,7 +2101,7 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2235,7 +2192,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2267,7 +2224,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2400,7 +2357,7 @@ async def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, + text: response_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index ecfcee4800..125a45e33c 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -170,6 +170,7 @@ def create( filename: str, mime_type: str, purpose: FilePurpose, + expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -213,6 +214,9 @@ def create( See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -229,6 +233,7 @@ def create( "filename": filename, "mime_type": mime_type, "purpose": purpose, + "expires_after": expires_after, }, upload_create_params.UploadCreateParams, ), @@ -473,6 +478,7 @@ async def create( filename: str, mime_type: str, purpose: FilePurpose, + expires_after: upload_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -516,6 +522,9 @@ async def create( See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). + expires_after: The expiration policy for a file. By default, files with `purpose=batch` expire + after 30 days and all other files are persisted until they are manually deleted. + extra_headers: Send extra headers extra_query: Add additional query parameters to the request @@ -532,6 +541,7 @@ async def create( "filename": filename, "mime_type": mime_type, "purpose": purpose, + "expires_after": expires_after, }, upload_create_params.UploadCreateParams, ), diff --git a/src/openai/types/batch_create_params.py b/src/openai/types/batch_create_params.py index cc95afd3ba..c0f9034d5e 100644 --- a/src/openai/types/batch_create_params.py +++ b/src/openai/types/batch_create_params.py @@ -7,7 +7,7 @@ from .shared_params.metadata import Metadata -__all__ = ["BatchCreateParams"] +__all__ = ["BatchCreateParams", "OutputExpiresAfter"] class BatchCreateParams(TypedDict, total=False): @@ -47,3 +47,24 @@ class BatchCreateParams(TypedDict, total=False): Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters. """ + + output_expires_after: OutputExpiresAfter + """ + The expiration policy for the output and/or error file that are generated for a + batch. + """ + + +class OutputExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. Note that the anchor is the file creation time, + not the time the batch is created. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index f84b3ee4a0..f478a92fbb 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -260,7 +260,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index 6be09d8bae..8a477f9843 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -137,7 +137,7 @@ class SessionCreateParams(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index 5b4185dbf6..11929ab376 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -282,7 +282,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index 3063449bfd..e939f4cc79 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -280,7 +280,7 @@ class Session(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/transcription_session_create_params.py b/src/openai/types/beta/realtime/transcription_session_create_params.py index 15b2f14c14..3ac3af4fa9 100644 --- a/src/openai/types/beta/realtime/transcription_session_create_params.py +++ b/src/openai/types/beta/realtime/transcription_session_create_params.py @@ -61,7 +61,7 @@ class TranscriptionSessionCreateParams(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/transcription_session_update.py b/src/openai/types/beta/realtime/transcription_session_update.py index 73253b6848..5ae1ad226d 100644 --- a/src/openai/types/beta/realtime/transcription_session_update.py +++ b/src/openai/types/beta/realtime/transcription_session_update.py @@ -165,7 +165,7 @@ class Session(BaseModel): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/realtime/transcription_session_update_param.py b/src/openai/types/beta/realtime/transcription_session_update_param.py index 6b38a9af39..d7065f61c7 100644 --- a/src/openai/types/beta/realtime/transcription_session_update_param.py +++ b/src/openai/types/beta/realtime/transcription_session_update_param.py @@ -165,7 +165,7 @@ class Session(TypedDict, total=False): This can be set to `null` to turn off, in which case the client must manually trigger model response. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech. - Semantic VAD is more advanced and uses a turn detection model (in conjuction + Semantic VAD is more advanced and uses a turn detection model (in conjunction with VAD) to semantically estimate whether the user has finished speaking, then dynamically sets a timeout based on this probability. For example, if user audio trails off with "uhhm", the model will score a low probability of turn end and diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index d813710579..ad148d693a 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -169,7 +169,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index da9418d6f9..c545cc3759 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -228,7 +228,7 @@ class Run(BaseModel): truncation_strategy: Optional[TruncationStrategy] = None """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ usage: Optional[Usage] = None diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index f9defcb19c..cfd272f5ad 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -176,7 +176,7 @@ class RunCreateParamsBase(TypedDict, total=False): truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. - Use this to control the intial context window of the run. + Use this to control the initial context window of the run. """ diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 42463f7ec8..6bc4bafe79 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -68,9 +68,8 @@ class ChatCompletion(BaseModel): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 082bb6cc19..ea32d157ef 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -137,9 +137,8 @@ class ChatCompletionChunk(BaseModel): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index a3bc90b0a2..3ebab45b56 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -25,6 +25,7 @@ "FunctionCall", "Function", "ResponseFormat", + "Text", "WebSearchOptions", "WebSearchOptionsUserLocation", "WebSearchOptionsUserLocationApproximate", @@ -233,9 +234,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -271,6 +271,8 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ + text: Text + tool_choice: ChatCompletionToolChoiceOptionParam """ Controls which (if any) tool is called by the model. `none` means the model will @@ -365,6 +367,16 @@ class Function(TypedDict, total=False): ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject] +class Text(TypedDict, total=False): + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): city: str """Free text input for the city of the user, e.g. `San Francisco`.""" diff --git a/src/openai/types/file_create_params.py b/src/openai/types/file_create_params.py index 728dfd350f..f4583b16a3 100644 --- a/src/openai/types/file_create_params.py +++ b/src/openai/types/file_create_params.py @@ -2,12 +2,12 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes from .file_purpose import FilePurpose -__all__ = ["FileCreateParams"] +__all__ = ["FileCreateParams", "ExpiresAfter"] class FileCreateParams(TypedDict, total=False): @@ -22,3 +22,24 @@ class FileCreateParams(TypedDict, total=False): fine-tuning - `user_data`: Flexible file type for any purpose - `evals`: Used for eval data sets """ + + expires_after: ExpiresAfter + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files + are persisted until they are manually deleted. + """ + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py index 74d8688081..72ec741f91 100644 --- a/src/openai/types/responses/__init__.py +++ b/src/openai/types/responses/__init__.py @@ -42,7 +42,6 @@ from .response_input_param import ResponseInputParam as ResponseInputParam from .response_output_item import ResponseOutputItem as ResponseOutputItem from .response_output_text import ResponseOutputText as ResponseOutputText -from .response_text_config import ResponseTextConfig as ResponseTextConfig from .tool_choice_function import ToolChoiceFunction as ToolChoiceFunction from .response_failed_event import ResponseFailedEvent as ResponseFailedEvent from .response_prompt_param import ResponsePromptParam as ResponsePromptParam @@ -76,7 +75,6 @@ from .response_in_progress_event import ResponseInProgressEvent as ResponseInProgressEvent from .response_input_image_param import ResponseInputImageParam as ResponseInputImageParam from .response_output_text_param import ResponseOutputTextParam as ResponseOutputTextParam -from .response_text_config_param import ResponseTextConfigParam as ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam as ToolChoiceFunctionParam from .response_computer_tool_call import ResponseComputerToolCall as ResponseComputerToolCall from .response_format_text_config import ResponseFormatTextConfig as ResponseFormatTextConfig diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 5ebb18fda4..49e38a46fe 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -18,11 +18,11 @@ from .tool_choice_allowed import ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions from .response_output_item import ResponseOutputItem -from .response_text_config import ResponseTextConfig from .tool_choice_function import ToolChoiceFunction from ..shared.responses_model import ResponsesModel +from .response_format_text_config import ResponseFormatTextConfig -__all__ = ["Response", "IncompleteDetails", "ToolChoice"] +__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Text"] class IncompleteDetails(BaseModel): @@ -35,6 +35,32 @@ class IncompleteDetails(BaseModel): ] +class Text(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + verbosity: Optional[Literal["low", "medium", "high"]] = None + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + class Response(BaseModel): id: str """Unique identifier for this Response.""" @@ -177,7 +203,7 @@ class Response(BaseModel): """ reasoning: Optional[Reasoning] = None - """**o-series models only** + """**gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -201,9 +227,8 @@ class Response(BaseModel): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -219,14 +244,7 @@ class Response(BaseModel): `incomplete`. """ - text: Optional[ResponseTextConfig] = None - """Configuration options for a text response from the model. - - Can be plain text or structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - """ + text: Optional[Text] = None top_logprobs: Optional[int] = None """ diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index ea91fa1265..89afccf06b 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -16,13 +16,14 @@ from ..shared_params.reasoning import Reasoning from .tool_choice_custom_param import ToolChoiceCustomParam from .tool_choice_allowed_param import ToolChoiceAllowedParam -from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam from ..shared_params.responses_model import ResponsesModel +from .response_format_text_config_param import ResponseFormatTextConfigParam __all__ = [ "ResponseCreateParamsBase", "StreamOptions", + "Text", "ToolChoice", "ResponseCreateParamsNonStreaming", "ResponseCreateParamsStreaming", @@ -134,7 +135,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): """ reasoning: Optional[Reasoning] - """**o-series models only** + """**gpt-5 and o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). @@ -158,9 +159,8 @@ class ResponseCreateParamsBase(TypedDict, total=False): - If set to 'default', then the request will be processed with the standard pricing and performance for the selected model. - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or - 'priority', then the request will be processed with the corresponding service - tier. [Contact sales](https://openai.com/contact-sales) to learn more about - Priority processing. + '[priority](https://openai.com/api-priority-processing/)', then the request + will be processed with the corresponding service tier. - When not set, the default behavior is 'auto'. When the `service_tier` parameter is set, the response body will include the @@ -183,14 +183,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - text: ResponseTextConfigParam - """Configuration options for a text response from the model. - - Can be plain text or structured JSON data. Learn more: - - - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) - - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) - """ + text: Text tool_choice: ToolChoice """ @@ -267,6 +260,32 @@ class StreamOptions(TypedDict, total=False): """ +class Text(TypedDict, total=False): + format: ResponseFormatTextConfigParam + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + verbosity: Optional[Literal["low", "medium", "high"]] + """Constrains the verbosity of the model's response. + + Lower values will result in more concise responses, while higher values will + result in more verbose responses. Currently supported values are `low`, + `medium`, and `high`. + """ + + ToolChoice: TypeAlias = Union[ ToolChoiceOptions, ToolChoiceAllowedParam, diff --git a/src/openai/types/responses/response_text_config.py b/src/openai/types/responses/response_text_config.py deleted file mode 100644 index c53546da6d..0000000000 --- a/src/openai/types/responses/response_text_config.py +++ /dev/null @@ -1,35 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel -from .response_format_text_config import ResponseFormatTextConfig - -__all__ = ["ResponseTextConfig"] - - -class ResponseTextConfig(BaseModel): - format: Optional[ResponseFormatTextConfig] = None - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] = None - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ diff --git a/src/openai/types/responses/response_text_config_param.py b/src/openai/types/responses/response_text_config_param.py deleted file mode 100644 index 1229fce35b..0000000000 --- a/src/openai/types/responses/response_text_config_param.py +++ /dev/null @@ -1,36 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal, TypedDict - -from .response_format_text_config_param import ResponseFormatTextConfigParam - -__all__ = ["ResponseTextConfigParam"] - - -class ResponseTextConfigParam(TypedDict, total=False): - format: ResponseFormatTextConfigParam - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ diff --git a/src/openai/types/upload_create_params.py b/src/openai/types/upload_create_params.py index 2ebabe6c66..ab4cded81d 100644 --- a/src/openai/types/upload_create_params.py +++ b/src/openai/types/upload_create_params.py @@ -2,11 +2,11 @@ from __future__ import annotations -from typing_extensions import Required, TypedDict +from typing_extensions import Literal, Required, TypedDict from .file_purpose import FilePurpose -__all__ = ["UploadCreateParams"] +__all__ = ["UploadCreateParams", "ExpiresAfter"] class UploadCreateParams(TypedDict, total=False): @@ -29,3 +29,24 @@ class UploadCreateParams(TypedDict, total=False): See the [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose). """ + + expires_after: ExpiresAfter + """The expiration policy for a file. + + By default, files with `purpose=batch` expire after 30 days and all other files + are persisted until they are manually deleted. + """ + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["created_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `created_at`. + """ + + seconds: Required[int] + """The number of seconds after the anchor time that the file will expire. + + Must be between 3600 (1 hour) and 2592000 (30 days). + """ diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 358ea18cbb..885c3bd9a6 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -86,6 +86,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -218,6 +219,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -527,6 +529,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -659,6 +662,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "include_usage": True, }, temperature=1, + text={"verbosity": "low"}, tool_choice="none", tools=[ { diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py index 6775094a58..95b94c4846 100644 --- a/tests/api_resources/test_batches.py +++ b/tests/api_resources/test_batches.py @@ -34,6 +34,10 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: endpoint="/v1/responses", input_file_id="string", metadata={"foo": "string"}, + output_expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, ) assert_matches_type(Batch, batch, path=["response"]) @@ -196,6 +200,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> endpoint="/v1/responses", input_file_id="string", metadata={"foo": "string"}, + output_expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, ) assert_matches_type(Batch, batch, path=["response"]) diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py index fc4bb4a18e..67c809f155 100644 --- a/tests/api_resources/test_files.py +++ b/tests/api_resources/test_files.py @@ -31,6 +31,18 @@ def test_method_create(self, client: OpenAI) -> None: ) assert_matches_type(FileObject, file, path=["response"]) + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + file = client.files.create( + file=b"raw file contents", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(FileObject, file, path=["response"]) + @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.files.with_raw_response.create( @@ -272,6 +284,18 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: ) assert_matches_type(FileObject, file, path=["response"]) + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + file = await async_client.files.create( + file=b"raw file contents", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(FileObject, file, path=["response"]) + @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.create( diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 310800b87e..868ab3a4ca 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -10,9 +10,7 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai._utils import assert_signatures_in_sync -from openai.types.responses import ( - Response, -) +from openai.types.responses import Response base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/api_resources/test_uploads.py b/tests/api_resources/test_uploads.py index 72a2f6c83d..0e438a3c61 100644 --- a/tests/api_resources/test_uploads.py +++ b/tests/api_resources/test_uploads.py @@ -27,6 +27,20 @@ def test_method_create(self, client: OpenAI) -> None: ) assert_matches_type(Upload, upload, path=["response"]) + @parametrize + def test_method_create_with_all_params(self, client: OpenAI) -> None: + upload = client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(Upload, upload, path=["response"]) + @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.uploads.with_raw_response.create( @@ -162,6 +176,20 @@ async def test_method_create(self, async_client: AsyncOpenAI) -> None: ) assert_matches_type(Upload, upload, path=["response"]) + @parametrize + async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: + upload = await async_client.uploads.create( + bytes=0, + filename="filename", + mime_type="mime_type", + purpose="assistants", + expires_after={ + "anchor": "created_at", + "seconds": 3600, + }, + ) + assert_matches_type(Upload, upload, path=["response"]) + @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.uploads.with_raw_response.create( From 96aa6050d2968d5a29f15e8841a925983f6c76f4 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 18 Aug 2025 05:06:39 +0000 Subject: [PATCH 2/2] release: 1.100.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 2dfeb2d9bb..e1f6d3e50c 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.99.9" + ".": "1.100.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 392fb8b667..0adb892623 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.100.0 (2025-08-18) + +Full Changelog: [v1.99.9...v1.100.0](https://github.com/openai/openai-python/compare/v1.99.9...v1.100.0) + +### Features + +* **api:** add new text parameters, expiration options ([e3dfa7c](https://github.com/openai/openai-python/commit/e3dfa7c417b8c750ff62d98650e75e72ad9b1477)) + ## 1.99.9 (2025-08-12) Full Changelog: [v1.99.8...v1.99.9](https://github.com/openai/openai-python/compare/v1.99.8...v1.99.9) diff --git a/pyproject.toml b/pyproject.toml index ced6079b6d..5fc0396a46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.99.9" +version = "1.100.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 7d3b3da5d7..d666729b59 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.99.9" # x-release-please-version +__version__ = "1.100.0" # x-release-please-version