From c39d5fd3f5429c6d41f257669a1dd4c67a477455 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 13:48:27 +0000 Subject: [PATCH 1/2] chore(api): accurately represent shape for verbosity on Chat Completions --- .stats.yml | 6 +- .../resources/chat/completions/completions.py | 30 ++------- src/openai/resources/responses/responses.py | 65 +++++++++++++++---- .../types/chat/completion_create_params.py | 15 +---- .../types/graders/text_similarity_grader.py | 16 ++++- .../graders/text_similarity_grader_param.py | 16 ++++- src/openai/types/responses/response.py | 39 +++-------- .../types/responses/response_create_params.py | 38 +++-------- tests/api_resources/chat/test_completions.py | 4 -- tests/api_resources/test_responses.py | 4 +- tests/lib/chat/test_completions.py | 2 +- 11 files changed, 110 insertions(+), 125 deletions(-) diff --git a/.stats.yml b/.stats.yml index 66c46e7730..81c991168c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-24be531010b354303d741fc9247c1f84f75978f9f7de68aca92cb4f240a04722.yml -openapi_spec_hash: 3e46f439f6a863beadc71577eb4efa15 -config_hash: ed87b9139ac595a04a2162d754df2fed +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-7ef7a457c3bf05364e66e48c9ca34f31bfef1f6c9b7c15b1812346105e0abb16.yml +openapi_spec_hash: a2b1f5d8fbb62175c93b0ebea9f10063 +config_hash: 76afa3236f36854a8705f1281b1990b8 diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index bc5fe0fc05..7e209ff0ee 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -103,7 +103,6 @@ def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -204,7 +203,6 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), - "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -267,7 +265,6 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -459,7 +456,7 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -556,7 +553,6 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -757,7 +753,7 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -845,7 +841,6 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1046,7 +1041,7 @@ def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -1134,7 +1129,6 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1181,7 +1175,6 @@ def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -1404,7 +1397,6 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1475,7 +1467,6 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, - text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, @@ -1548,7 +1539,6 @@ async def parse( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -1649,7 +1639,6 @@ def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseForma "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": _type_to_response_format(response_format), - "text": text, "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, @@ -1712,7 +1701,6 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1904,7 +1892,7 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream: If set to true, the model response data will be streamed to the client as it is generated using @@ -2001,7 +1989,6 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2202,7 +2189,7 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -2290,7 +2277,6 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2491,7 +2477,7 @@ async def create( our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. stream_options: Options for streaming response. Only set this when you set `stream: true`. @@ -2579,7 +2565,6 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolUnionParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2626,7 +2611,6 @@ async def create( "stream": stream, "stream_options": stream_options, "temperature": temperature, - "text": text, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, @@ -2849,7 +2833,6 @@ def stream( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, - text: completion_create_params.Text | NotGiven = NOT_GIVEN, safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, @@ -2921,7 +2904,6 @@ def stream( presence_penalty=presence_penalty, prompt_cache_key=prompt_cache_key, reasoning_effort=reasoning_effort, - text=text, safety_identifier=safety_identifier, seed=seed, service_tier=service_tier, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index 97ad0faa94..375f8b7e71 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -43,6 +43,7 @@ from ...types.responses.response_input_param import ResponseInputParam from ...types.responses.response_prompt_param import ResponsePromptParam from ...types.responses.response_stream_event import ResponseStreamEvent +from ...types.responses.response_text_config_param import ResponseTextConfigParam __all__ = ["Responses", "AsyncResponses"] @@ -94,7 +95,7 @@ def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -238,6 +239,12 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -315,7 +322,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -459,6 +466,12 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -536,7 +549,7 @@ def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -680,6 +693,12 @@ def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -756,7 +775,7 @@ def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -847,7 +866,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -879,7 +898,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -1008,7 +1027,7 @@ def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1439,7 +1458,7 @@ async def create( stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1583,6 +1602,12 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1660,7 +1685,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -1804,6 +1829,12 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -1881,7 +1912,7 @@ async def create( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2025,6 +2056,12 @@ async def create( focused and deterministic. We generally recommend altering this or `top_p` but not both. + text: Configuration options for a text response from the model. Can be plain text or + structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + tool_choice: How the model should select which tool (or tools) to use when generating a response. See the `tools` parameter to see how to specify which tools the model can call. @@ -2101,7 +2138,7 @@ async def create( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, @@ -2192,7 +2229,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2224,7 +2261,7 @@ def stream( store: Optional[bool] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, truncation: Optional[Literal["auto", "disabled"]] | NotGiven = NOT_GIVEN, @@ -2357,7 +2394,7 @@ async def parse( stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, stream_options: Optional[response_create_params.StreamOptions] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, - text: response_create_params.Text | NotGiven = NOT_GIVEN, + text: ResponseTextConfigParam | NotGiven = NOT_GIVEN, tool_choice: response_create_params.ToolChoice | NotGiven = NOT_GIVEN, tools: Iterable[ParseableToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 3ebab45b56..da37ee4c13 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -25,7 +25,6 @@ "FunctionCall", "Function", "ResponseFormat", - "Text", "WebSearchOptions", "WebSearchOptionsUserLocation", "WebSearchOptionsUserLocationApproximate", @@ -257,7 +256,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): our [model distillation](https://platform.openai.com/docs/guides/distillation) or [evals](https://platform.openai.com/docs/guides/evals) products. - Supports text and image inputs. Note: image inputs over 10MB will be dropped. + Supports text and image inputs. Note: image inputs over 8MB will be dropped. """ stream_options: Optional[ChatCompletionStreamOptionsParam] @@ -271,8 +270,6 @@ class CompletionCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - text: Text - tool_choice: ChatCompletionToolChoiceOptionParam """ Controls which (if any) tool is called by the model. `none` means the model will @@ -367,16 +364,6 @@ class Function(TypedDict, total=False): ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject] -class Text(TypedDict, total=False): - verbosity: Optional[Literal["low", "medium", "high"]] - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ - - class WebSearchOptionsUserLocationApproximate(TypedDict, total=False): city: str """Free text input for the city of the user, e.g. `San Francisco`.""" diff --git a/src/openai/types/graders/text_similarity_grader.py b/src/openai/types/graders/text_similarity_grader.py index 738d317766..9082ac8969 100644 --- a/src/openai/types/graders/text_similarity_grader.py +++ b/src/openai/types/graders/text_similarity_grader.py @@ -9,12 +9,22 @@ class TextSimilarityGrader(BaseModel): evaluation_metric: Literal[ - "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" + "cosine", + "fuzzy_match", + "bleu", + "gleu", + "meteor", + "rouge_1", + "rouge_2", + "rouge_3", + "rouge_4", + "rouge_5", + "rouge_l", ] """The evaluation metric to use. - One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, - `rouge_4`, `rouge_5`, or `rouge_l`. + One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, + `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. """ input: str diff --git a/src/openai/types/graders/text_similarity_grader_param.py b/src/openai/types/graders/text_similarity_grader_param.py index db14553217..1646afc84b 100644 --- a/src/openai/types/graders/text_similarity_grader_param.py +++ b/src/openai/types/graders/text_similarity_grader_param.py @@ -10,13 +10,23 @@ class TextSimilarityGraderParam(TypedDict, total=False): evaluation_metric: Required[ Literal[ - "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" + "cosine", + "fuzzy_match", + "bleu", + "gleu", + "meteor", + "rouge_1", + "rouge_2", + "rouge_3", + "rouge_4", + "rouge_5", + "rouge_l", ] ] """The evaluation metric to use. - One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, - `rouge_4`, `rouge_5`, or `rouge_l`. + One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, + `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`. """ input: Required[str] diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 49e38a46fe..49f60bbc5c 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -18,11 +18,11 @@ from .tool_choice_allowed import ToolChoiceAllowed from .tool_choice_options import ToolChoiceOptions from .response_output_item import ResponseOutputItem +from .response_text_config import ResponseTextConfig from .tool_choice_function import ToolChoiceFunction from ..shared.responses_model import ResponsesModel -from .response_format_text_config import ResponseFormatTextConfig -__all__ = ["Response", "IncompleteDetails", "ToolChoice", "Text"] +__all__ = ["Response", "IncompleteDetails", "ToolChoice"] class IncompleteDetails(BaseModel): @@ -35,32 +35,6 @@ class IncompleteDetails(BaseModel): ] -class Text(BaseModel): - format: Optional[ResponseFormatTextConfig] = None - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] = None - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ - - class Response(BaseModel): id: str """Unique identifier for this Response.""" @@ -244,7 +218,14 @@ class Response(BaseModel): `incomplete`. """ - text: Optional[Text] = None + text: Optional[ResponseTextConfig] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ top_logprobs: Optional[int] = None """ diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 89afccf06b..0cd761fcf0 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -16,14 +16,13 @@ from ..shared_params.reasoning import Reasoning from .tool_choice_custom_param import ToolChoiceCustomParam from .tool_choice_allowed_param import ToolChoiceAllowedParam +from .response_text_config_param import ResponseTextConfigParam from .tool_choice_function_param import ToolChoiceFunctionParam from ..shared_params.responses_model import ResponsesModel -from .response_format_text_config_param import ResponseFormatTextConfigParam __all__ = [ "ResponseCreateParamsBase", "StreamOptions", - "Text", "ToolChoice", "ResponseCreateParamsNonStreaming", "ResponseCreateParamsStreaming", @@ -183,7 +182,14 @@ class ResponseCreateParamsBase(TypedDict, total=False): this or `top_p` but not both. """ - text: Text + text: ResponseTextConfigParam + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ tool_choice: ToolChoice """ @@ -260,32 +266,6 @@ class StreamOptions(TypedDict, total=False): """ -class Text(TypedDict, total=False): - format: ResponseFormatTextConfigParam - """An object specifying the format that the model must output. - - Configuring `{ "type": "json_schema" }` enables Structured Outputs, which - ensures the model will match your supplied JSON schema. Learn more in the - [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). - - The default format is `{ "type": "text" }` with no additional options. - - **Not recommended for gpt-4o and newer models:** - - Setting to `{ "type": "json_object" }` enables the older JSON mode, which - ensures the message the model generates is valid JSON. Using `json_schema` is - preferred for models that support it. - """ - - verbosity: Optional[Literal["low", "medium", "high"]] - """Constrains the verbosity of the model's response. - - Lower values will result in more concise responses, while higher values will - result in more verbose responses. Currently supported values are `low`, - `medium`, and `high`. - """ - - ToolChoice: TypeAlias = Union[ ToolChoiceOptions, ToolChoiceAllowedParam, diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 885c3bd9a6..358ea18cbb 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -86,7 +86,6 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -219,7 +218,6 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -529,7 +527,6 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { @@ -662,7 +659,6 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "include_usage": True, }, temperature=1, - text={"verbosity": "low"}, tool_choice="none", tools=[ { diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 868ab3a4ca..310800b87e 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -10,7 +10,9 @@ from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai._utils import assert_signatures_in_sync -from openai.types.responses import Response +from openai.types.responses import ( + Response, +) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") diff --git a/tests/lib/chat/test_completions.py b/tests/lib/chat/test_completions.py index f04a0e3782..f69bc09ca3 100644 --- a/tests/lib/chat/test_completions.py +++ b/tests/lib/chat/test_completions.py @@ -541,7 +541,7 @@ class Location(BaseModel): content_snapshot=snapshot( '{"id": "chatcmpl-ABfvvX7eB1KsfeZj8VcF3z7G7SbaA", "object": "chat.completion", "created": 1727346163, "model": "gpt-4o-2024-08-06", "choices": [{"index": 0, "message": {"role": "assistant", "content": "{\\"", "refusal": null}, "logprobs": null, "finish_reason": "length"}], "usage": {"prompt_tokens": 79, "completion_tokens": 1, "total_tokens": 80, "completion_tokens_details": {"reasoning_tokens": 0}}, "system_fingerprint": "fp_7568d46099"}' ), - path="/chat/completions", + path="/chat/completions", mock_client=client, respx_mock=respx_mock, ) From 5bcb2a5355b5c6995a37535a9e31275f54118d64 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 14:10:47 +0000 Subject: [PATCH 2/2] release: 1.100.2 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 8 ++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6fb2e7075d..8910831376 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.100.1" + ".": "1.100.2" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f3362af2f..2254a59f75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.100.2 (2025-08-19) + +Full Changelog: [v1.100.1...v1.100.2](https://github.com/openai/openai-python/compare/v1.100.1...v1.100.2) + +### Chores + +* **api:** accurately represent shape for verbosity on Chat Completions ([c39d5fd](https://github.com/openai/openai-python/commit/c39d5fd3f5429c6d41f257669a1dd4c67a477455)) + ## 1.100.1 (2025-08-18) Full Changelog: [v1.100.0...v1.100.1](https://github.com/openai/openai-python/compare/v1.100.0...v1.100.1) diff --git a/pyproject.toml b/pyproject.toml index a9baee6a55..c8c3d2fd2b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.100.1" +version = "1.100.2" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 608d190655..29840a21b8 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.100.1" # x-release-please-version +__version__ = "1.100.2" # x-release-please-version