Skip to content

Commit 7b6f118

Browse files
feat(api): add /v1/responses and built-in tools (openai#2177)
[platform.openai.com/docs/changelog](http://platform.openai.com/docs/changelog)
1 parent 44b61bd commit 7b6f118

File tree

178 files changed

+8087
-674
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

178 files changed

+8087
-674
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
configured_endpoints: 74
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b524aed1c2c5c928aa4e2c546f5dbb364e7b4d5027daf05e42e210b05a97c3c6.yml
1+
configured_endpoints: 81
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-be834d63e326a82494e819085137f5eb15866f3fc787db1f3afe7168d419e18a.yml

api.md

Lines changed: 161 additions & 57 deletions
Large diffs are not rendered by default.

src/openai/_client.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,9 @@
3737
from .resources.chat import chat
3838
from .resources.audio import audio
3939
from .resources.uploads import uploads
40+
from .resources.responses import responses
4041
from .resources.fine_tuning import fine_tuning
42+
from .resources.vector_stores import vector_stores
4143

4244
__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient"]
4345

@@ -52,9 +54,11 @@ class OpenAI(SyncAPIClient):
5254
moderations: moderations.Moderations
5355
models: models.Models
5456
fine_tuning: fine_tuning.FineTuning
57+
vector_stores: vector_stores.VectorStores
5558
beta: beta.Beta
5659
batches: batches.Batches
5760
uploads: uploads.Uploads
61+
responses: responses.Responses
5862
with_raw_response: OpenAIWithRawResponse
5963
with_streaming_response: OpenAIWithStreamedResponse
6064

@@ -149,9 +153,11 @@ def __init__(
149153
self.moderations = moderations.Moderations(self)
150154
self.models = models.Models(self)
151155
self.fine_tuning = fine_tuning.FineTuning(self)
156+
self.vector_stores = vector_stores.VectorStores(self)
152157
self.beta = beta.Beta(self)
153158
self.batches = batches.Batches(self)
154159
self.uploads = uploads.Uploads(self)
160+
self.responses = responses.Responses(self)
155161
self.with_raw_response = OpenAIWithRawResponse(self)
156162
self.with_streaming_response = OpenAIWithStreamedResponse(self)
157163

@@ -279,9 +285,11 @@ class AsyncOpenAI(AsyncAPIClient):
279285
moderations: moderations.AsyncModerations
280286
models: models.AsyncModels
281287
fine_tuning: fine_tuning.AsyncFineTuning
288+
vector_stores: vector_stores.AsyncVectorStores
282289
beta: beta.AsyncBeta
283290
batches: batches.AsyncBatches
284291
uploads: uploads.AsyncUploads
292+
responses: responses.AsyncResponses
285293
with_raw_response: AsyncOpenAIWithRawResponse
286294
with_streaming_response: AsyncOpenAIWithStreamedResponse
287295

@@ -376,9 +384,11 @@ def __init__(
376384
self.moderations = moderations.AsyncModerations(self)
377385
self.models = models.AsyncModels(self)
378386
self.fine_tuning = fine_tuning.AsyncFineTuning(self)
387+
self.vector_stores = vector_stores.AsyncVectorStores(self)
379388
self.beta = beta.AsyncBeta(self)
380389
self.batches = batches.AsyncBatches(self)
381390
self.uploads = uploads.AsyncUploads(self)
391+
self.responses = responses.AsyncResponses(self)
382392
self.with_raw_response = AsyncOpenAIWithRawResponse(self)
383393
self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self)
384394

@@ -507,9 +517,11 @@ def __init__(self, client: OpenAI) -> None:
507517
self.moderations = moderations.ModerationsWithRawResponse(client.moderations)
508518
self.models = models.ModelsWithRawResponse(client.models)
509519
self.fine_tuning = fine_tuning.FineTuningWithRawResponse(client.fine_tuning)
520+
self.vector_stores = vector_stores.VectorStoresWithRawResponse(client.vector_stores)
510521
self.beta = beta.BetaWithRawResponse(client.beta)
511522
self.batches = batches.BatchesWithRawResponse(client.batches)
512523
self.uploads = uploads.UploadsWithRawResponse(client.uploads)
524+
self.responses = responses.ResponsesWithRawResponse(client.responses)
513525

514526

515527
class AsyncOpenAIWithRawResponse:
@@ -523,9 +535,11 @@ def __init__(self, client: AsyncOpenAI) -> None:
523535
self.moderations = moderations.AsyncModerationsWithRawResponse(client.moderations)
524536
self.models = models.AsyncModelsWithRawResponse(client.models)
525537
self.fine_tuning = fine_tuning.AsyncFineTuningWithRawResponse(client.fine_tuning)
538+
self.vector_stores = vector_stores.AsyncVectorStoresWithRawResponse(client.vector_stores)
526539
self.beta = beta.AsyncBetaWithRawResponse(client.beta)
527540
self.batches = batches.AsyncBatchesWithRawResponse(client.batches)
528541
self.uploads = uploads.AsyncUploadsWithRawResponse(client.uploads)
542+
self.responses = responses.AsyncResponsesWithRawResponse(client.responses)
529543

530544

531545
class OpenAIWithStreamedResponse:
@@ -539,9 +553,11 @@ def __init__(self, client: OpenAI) -> None:
539553
self.moderations = moderations.ModerationsWithStreamingResponse(client.moderations)
540554
self.models = models.ModelsWithStreamingResponse(client.models)
541555
self.fine_tuning = fine_tuning.FineTuningWithStreamingResponse(client.fine_tuning)
556+
self.vector_stores = vector_stores.VectorStoresWithStreamingResponse(client.vector_stores)
542557
self.beta = beta.BetaWithStreamingResponse(client.beta)
543558
self.batches = batches.BatchesWithStreamingResponse(client.batches)
544559
self.uploads = uploads.UploadsWithStreamingResponse(client.uploads)
560+
self.responses = responses.ResponsesWithStreamingResponse(client.responses)
545561

546562

547563
class AsyncOpenAIWithStreamedResponse:
@@ -555,9 +571,11 @@ def __init__(self, client: AsyncOpenAI) -> None:
555571
self.moderations = moderations.AsyncModerationsWithStreamingResponse(client.moderations)
556572
self.models = models.AsyncModelsWithStreamingResponse(client.models)
557573
self.fine_tuning = fine_tuning.AsyncFineTuningWithStreamingResponse(client.fine_tuning)
574+
self.vector_stores = vector_stores.AsyncVectorStoresWithStreamingResponse(client.vector_stores)
558575
self.beta = beta.AsyncBetaWithStreamingResponse(client.beta)
559576
self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches)
560577
self.uploads = uploads.AsyncUploadsWithStreamingResponse(client.uploads)
578+
self.responses = responses.AsyncResponsesWithStreamingResponse(client.responses)
561579

562580

563581
Client = OpenAI

src/openai/resources/__init__.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,14 @@
6464
UploadsWithStreamingResponse,
6565
AsyncUploadsWithStreamingResponse,
6666
)
67+
from .responses import (
68+
Responses,
69+
AsyncResponses,
70+
ResponsesWithRawResponse,
71+
AsyncResponsesWithRawResponse,
72+
ResponsesWithStreamingResponse,
73+
AsyncResponsesWithStreamingResponse,
74+
)
6775
from .embeddings import (
6876
Embeddings,
6977
AsyncEmbeddings,
@@ -96,6 +104,14 @@
96104
ModerationsWithStreamingResponse,
97105
AsyncModerationsWithStreamingResponse,
98106
)
107+
from .vector_stores import (
108+
VectorStores,
109+
AsyncVectorStores,
110+
VectorStoresWithRawResponse,
111+
AsyncVectorStoresWithRawResponse,
112+
VectorStoresWithStreamingResponse,
113+
AsyncVectorStoresWithStreamingResponse,
114+
)
99115

100116
__all__ = [
101117
"Completions",
@@ -152,6 +168,12 @@
152168
"AsyncFineTuningWithRawResponse",
153169
"FineTuningWithStreamingResponse",
154170
"AsyncFineTuningWithStreamingResponse",
171+
"VectorStores",
172+
"AsyncVectorStores",
173+
"VectorStoresWithRawResponse",
174+
"AsyncVectorStoresWithRawResponse",
175+
"VectorStoresWithStreamingResponse",
176+
"AsyncVectorStoresWithStreamingResponse",
155177
"Beta",
156178
"AsyncBeta",
157179
"BetaWithRawResponse",
@@ -170,4 +192,10 @@
170192
"AsyncUploadsWithRawResponse",
171193
"UploadsWithStreamingResponse",
172194
"AsyncUploadsWithStreamingResponse",
195+
"Responses",
196+
"AsyncResponses",
197+
"ResponsesWithRawResponse",
198+
"AsyncResponsesWithRawResponse",
199+
"ResponsesWithStreamingResponse",
200+
"AsyncResponsesWithStreamingResponse",
173201
]

src/openai/resources/beta/__init__.py

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,22 +24,8 @@
2424
AssistantsWithStreamingResponse,
2525
AsyncAssistantsWithStreamingResponse,
2626
)
27-
from .vector_stores import (
28-
VectorStores,
29-
AsyncVectorStores,
30-
VectorStoresWithRawResponse,
31-
AsyncVectorStoresWithRawResponse,
32-
VectorStoresWithStreamingResponse,
33-
AsyncVectorStoresWithStreamingResponse,
34-
)
3527

3628
__all__ = [
37-
"VectorStores",
38-
"AsyncVectorStores",
39-
"VectorStoresWithRawResponse",
40-
"AsyncVectorStoresWithRawResponse",
41-
"VectorStoresWithStreamingResponse",
42-
"AsyncVectorStoresWithStreamingResponse",
4329
"Assistants",
4430
"AsyncAssistants",
4531
"AssistantsWithRawResponse",

src/openai/resources/beta/assistants.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from ...types.shared.chat_model import ChatModel
2828
from ...types.beta.assistant_deleted import AssistantDeleted
2929
from ...types.shared_params.metadata import Metadata
30+
from ...types.shared.reasoning_effort import ReasoningEffort
3031
from ...types.beta.assistant_tool_param import AssistantToolParam
3132
from ...types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
3233

@@ -61,7 +62,7 @@ def create(
6162
instructions: Optional[str] | NotGiven = NOT_GIVEN,
6263
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
6364
name: Optional[str] | NotGiven = NOT_GIVEN,
64-
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
65+
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
6566
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
6667
temperature: Optional[float] | NotGiven = NOT_GIVEN,
6768
tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -98,7 +99,7 @@ def create(
9899
99100
name: The name of the assistant. The maximum length is 256 characters.
100101
101-
reasoning_effort: **o1 and o3-mini models only**
102+
reasoning_effort: **o-series models only**
102103
103104
Constrains effort on reasoning for
104105
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -256,7 +257,7 @@ def update(
256257
]
257258
| NotGiven = NOT_GIVEN,
258259
name: Optional[str] | NotGiven = NOT_GIVEN,
259-
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
260+
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
260261
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
261262
temperature: Optional[float] | NotGiven = NOT_GIVEN,
262263
tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -294,7 +295,7 @@ def update(
294295
295296
name: The name of the assistant. The maximum length is 256 characters.
296297
297-
reasoning_effort: **o1 and o3-mini models only**
298+
reasoning_effort: **o-series models only**
298299
299300
Constrains effort on reasoning for
300301
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -504,7 +505,7 @@ async def create(
504505
instructions: Optional[str] | NotGiven = NOT_GIVEN,
505506
metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
506507
name: Optional[str] | NotGiven = NOT_GIVEN,
507-
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
508+
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
508509
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
509510
temperature: Optional[float] | NotGiven = NOT_GIVEN,
510511
tool_resources: Optional[assistant_create_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -541,7 +542,7 @@ async def create(
541542
542543
name: The name of the assistant. The maximum length is 256 characters.
543544
544-
reasoning_effort: **o1 and o3-mini models only**
545+
reasoning_effort: **o-series models only**
545546
546547
Constrains effort on reasoning for
547548
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
@@ -699,7 +700,7 @@ async def update(
699700
]
700701
| NotGiven = NOT_GIVEN,
701702
name: Optional[str] | NotGiven = NOT_GIVEN,
702-
reasoning_effort: Optional[Literal["low", "medium", "high"]] | NotGiven = NOT_GIVEN,
703+
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
703704
response_format: Optional[AssistantResponseFormatOptionParam] | NotGiven = NOT_GIVEN,
704705
temperature: Optional[float] | NotGiven = NOT_GIVEN,
705706
tool_resources: Optional[assistant_update_params.ToolResources] | NotGiven = NOT_GIVEN,
@@ -737,7 +738,7 @@ async def update(
737738
738739
name: The name of the assistant. The maximum length is 256 characters.
739740
740-
reasoning_effort: **o1 and o3-mini models only**
741+
reasoning_effort: **o-series models only**
741742
742743
Constrains effort on reasoning for
743744
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently

src/openai/resources/beta/beta.py

Lines changed: 0 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -28,14 +28,6 @@
2828
RealtimeWithStreamingResponse,
2929
AsyncRealtimeWithStreamingResponse,
3030
)
31-
from .vector_stores.vector_stores import (
32-
VectorStores,
33-
AsyncVectorStores,
34-
VectorStoresWithRawResponse,
35-
AsyncVectorStoresWithRawResponse,
36-
VectorStoresWithStreamingResponse,
37-
AsyncVectorStoresWithStreamingResponse,
38-
)
3931

4032
__all__ = ["Beta", "AsyncBeta"]
4133

@@ -45,10 +37,6 @@ class Beta(SyncAPIResource):
4537
def realtime(self) -> Realtime:
4638
return Realtime(self._client)
4739

48-
@cached_property
49-
def vector_stores(self) -> VectorStores:
50-
return VectorStores(self._client)
51-
5240
@cached_property
5341
def assistants(self) -> Assistants:
5442
return Assistants(self._client)
@@ -82,10 +70,6 @@ class AsyncBeta(AsyncAPIResource):
8270
def realtime(self) -> AsyncRealtime:
8371
return AsyncRealtime(self._client)
8472

85-
@cached_property
86-
def vector_stores(self) -> AsyncVectorStores:
87-
return AsyncVectorStores(self._client)
88-
8973
@cached_property
9074
def assistants(self) -> AsyncAssistants:
9175
return AsyncAssistants(self._client)
@@ -122,10 +106,6 @@ def __init__(self, beta: Beta) -> None:
122106
def realtime(self) -> RealtimeWithRawResponse:
123107
return RealtimeWithRawResponse(self._beta.realtime)
124108

125-
@cached_property
126-
def vector_stores(self) -> VectorStoresWithRawResponse:
127-
return VectorStoresWithRawResponse(self._beta.vector_stores)
128-
129109
@cached_property
130110
def assistants(self) -> AssistantsWithRawResponse:
131111
return AssistantsWithRawResponse(self._beta.assistants)
@@ -143,10 +123,6 @@ def __init__(self, beta: AsyncBeta) -> None:
143123
def realtime(self) -> AsyncRealtimeWithRawResponse:
144124
return AsyncRealtimeWithRawResponse(self._beta.realtime)
145125

146-
@cached_property
147-
def vector_stores(self) -> AsyncVectorStoresWithRawResponse:
148-
return AsyncVectorStoresWithRawResponse(self._beta.vector_stores)
149-
150126
@cached_property
151127
def assistants(self) -> AsyncAssistantsWithRawResponse:
152128
return AsyncAssistantsWithRawResponse(self._beta.assistants)
@@ -164,10 +140,6 @@ def __init__(self, beta: Beta) -> None:
164140
def realtime(self) -> RealtimeWithStreamingResponse:
165141
return RealtimeWithStreamingResponse(self._beta.realtime)
166142

167-
@cached_property
168-
def vector_stores(self) -> VectorStoresWithStreamingResponse:
169-
return VectorStoresWithStreamingResponse(self._beta.vector_stores)
170-
171143
@cached_property
172144
def assistants(self) -> AssistantsWithStreamingResponse:
173145
return AssistantsWithStreamingResponse(self._beta.assistants)
@@ -185,10 +157,6 @@ def __init__(self, beta: AsyncBeta) -> None:
185157
def realtime(self) -> AsyncRealtimeWithStreamingResponse:
186158
return AsyncRealtimeWithStreamingResponse(self._beta.realtime)
187159

188-
@cached_property
189-
def vector_stores(self) -> AsyncVectorStoresWithStreamingResponse:
190-
return AsyncVectorStoresWithStreamingResponse(self._beta.vector_stores)
191-
192160
@cached_property
193161
def assistants(self) -> AsyncAssistantsWithStreamingResponse:
194162
return AsyncAssistantsWithStreamingResponse(self._beta.assistants)

0 commit comments

Comments
 (0)