31
31
parse_response ,
32
32
type_to_text_format_param as _type_to_text_format_param ,
33
33
)
34
- from ...types .shared .chat_model import ChatModel
35
34
from ...types .responses .response import Response
36
35
from ...types .responses .tool_param import ToolParam , ParseableToolParam
37
36
from ...types .shared_params .metadata import Metadata
@@ -881,22 +880,29 @@ def stream(
881
880
self ,
882
881
* ,
883
882
input : Union [str , ResponseInputParam ],
884
- model : Union [ str , ChatModel ] ,
883
+ model : ResponsesModel ,
885
884
background : Optional [bool ] | NotGiven = NOT_GIVEN ,
886
885
text_format : type [TextFormatT ] | NotGiven = NOT_GIVEN ,
887
886
tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
887
+ conversation : Optional [response_create_params .Conversation ] | NotGiven = NOT_GIVEN ,
888
888
include : Optional [List [ResponseIncludable ]] | NotGiven = NOT_GIVEN ,
889
889
instructions : Optional [str ] | NotGiven = NOT_GIVEN ,
890
890
max_output_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
891
+ max_tool_calls : Optional [int ] | NotGiven = NOT_GIVEN ,
891
892
metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
892
893
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
893
894
previous_response_id : Optional [str ] | NotGiven = NOT_GIVEN ,
895
+ prompt : Optional [ResponsePromptParam ] | NotGiven = NOT_GIVEN ,
896
+ prompt_cache_key : str | NotGiven = NOT_GIVEN ,
894
897
reasoning : Optional [Reasoning ] | NotGiven = NOT_GIVEN ,
898
+ safety_identifier : str | NotGiven = NOT_GIVEN ,
899
+ service_tier : Optional [Literal ["auto" , "default" , "flex" , "scale" , "priority" ]] | NotGiven = NOT_GIVEN ,
895
900
store : Optional [bool ] | NotGiven = NOT_GIVEN ,
896
901
stream_options : Optional [response_create_params .StreamOptions ] | NotGiven = NOT_GIVEN ,
897
902
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
898
- text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
903
+ text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
899
904
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
905
+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
900
906
top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
901
907
truncation : Optional [Literal ["auto" , "disabled" ]] | NotGiven = NOT_GIVEN ,
902
908
user : str | NotGiven = NOT_GIVEN ,
@@ -913,22 +919,29 @@ def stream(
913
919
* ,
914
920
response_id : str | NotGiven = NOT_GIVEN ,
915
921
input : Union [str , ResponseInputParam ] | NotGiven = NOT_GIVEN ,
916
- model : Union [ str , ChatModel ] | NotGiven = NOT_GIVEN ,
922
+ model : ResponsesModel | NotGiven = NOT_GIVEN ,
917
923
background : Optional [bool ] | NotGiven = NOT_GIVEN ,
918
924
text_format : type [TextFormatT ] | NotGiven = NOT_GIVEN ,
919
925
tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
926
+ conversation : Optional [response_create_params .Conversation ] | NotGiven = NOT_GIVEN ,
920
927
include : Optional [List [ResponseIncludable ]] | NotGiven = NOT_GIVEN ,
921
928
instructions : Optional [str ] | NotGiven = NOT_GIVEN ,
922
929
max_output_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
930
+ max_tool_calls : Optional [int ] | NotGiven = NOT_GIVEN ,
923
931
metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
924
932
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
925
933
previous_response_id : Optional [str ] | NotGiven = NOT_GIVEN ,
934
+ prompt : Optional [ResponsePromptParam ] | NotGiven = NOT_GIVEN ,
935
+ prompt_cache_key : str | NotGiven = NOT_GIVEN ,
926
936
reasoning : Optional [Reasoning ] | NotGiven = NOT_GIVEN ,
937
+ safety_identifier : str | NotGiven = NOT_GIVEN ,
938
+ service_tier : Optional [Literal ["auto" , "default" , "flex" , "scale" , "priority" ]] | NotGiven = NOT_GIVEN ,
927
939
store : Optional [bool ] | NotGiven = NOT_GIVEN ,
928
940
stream_options : Optional [response_create_params .StreamOptions ] | NotGiven = NOT_GIVEN ,
929
941
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
930
942
text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
931
943
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
944
+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
932
945
top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
933
946
truncation : Optional [Literal ["auto" , "disabled" ]] | NotGiven = NOT_GIVEN ,
934
947
user : str | NotGiven = NOT_GIVEN ,
@@ -943,18 +956,25 @@ def stream(
943
956
new_response_args = {
944
957
"input" : input ,
945
958
"model" : model ,
959
+ "conversation" : conversation ,
946
960
"include" : include ,
947
961
"instructions" : instructions ,
948
962
"max_output_tokens" : max_output_tokens ,
963
+ "max_tool_calls" : max_tool_calls ,
949
964
"metadata" : metadata ,
950
965
"parallel_tool_calls" : parallel_tool_calls ,
951
966
"previous_response_id" : previous_response_id ,
967
+ "prompt" : prompt ,
968
+ "prompt_cache_key" : prompt_cache_key ,
952
969
"reasoning" : reasoning ,
970
+ "safety_identifier" : safety_identifier ,
971
+ "service_tier" : service_tier ,
953
972
"store" : store ,
954
973
"stream_options" : stream_options ,
955
974
"temperature" : temperature ,
956
975
"text" : text ,
957
976
"tool_choice" : tool_choice ,
977
+ "top_logprobs" : top_logprobs ,
958
978
"top_p" : top_p ,
959
979
"truncation" : truncation ,
960
980
"user" : user ,
@@ -989,19 +1009,26 @@ def stream(
989
1009
input = input ,
990
1010
model = model ,
991
1011
tools = tools ,
1012
+ conversation = conversation ,
992
1013
include = include ,
993
1014
instructions = instructions ,
994
1015
max_output_tokens = max_output_tokens ,
1016
+ max_tool_calls = max_tool_calls ,
995
1017
metadata = metadata ,
996
1018
parallel_tool_calls = parallel_tool_calls ,
997
1019
previous_response_id = previous_response_id ,
1020
+ prompt = prompt ,
1021
+ prompt_cache_key = prompt_cache_key ,
998
1022
store = store ,
999
1023
stream_options = stream_options ,
1000
1024
stream = True ,
1001
1025
temperature = temperature ,
1002
1026
text = text ,
1003
1027
tool_choice = tool_choice ,
1004
1028
reasoning = reasoning ,
1029
+ safety_identifier = safety_identifier ,
1030
+ service_tier = service_tier ,
1031
+ top_logprobs = top_logprobs ,
1005
1032
top_p = top_p ,
1006
1033
truncation = truncation ,
1007
1034
user = user ,
@@ -1057,7 +1084,7 @@ def parse(
1057
1084
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
1058
1085
stream_options : Optional [response_create_params .StreamOptions ] | NotGiven = NOT_GIVEN ,
1059
1086
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
1060
- text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
1087
+ text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
1061
1088
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
1062
1089
tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
1063
1090
top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
@@ -2275,22 +2302,29 @@ def stream(
2275
2302
self ,
2276
2303
* ,
2277
2304
input : Union [str , ResponseInputParam ],
2278
- model : Union [ str , ChatModel ] ,
2305
+ model : ResponsesModel ,
2279
2306
background : Optional [bool ] | NotGiven = NOT_GIVEN ,
2280
2307
text_format : type [TextFormatT ] | NotGiven = NOT_GIVEN ,
2281
2308
tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
2309
+ conversation : Optional [response_create_params .Conversation ] | NotGiven = NOT_GIVEN ,
2282
2310
include : Optional [List [ResponseIncludable ]] | NotGiven = NOT_GIVEN ,
2283
2311
instructions : Optional [str ] | NotGiven = NOT_GIVEN ,
2284
2312
max_output_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
2313
+ max_tool_calls : Optional [int ] | NotGiven = NOT_GIVEN ,
2285
2314
metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
2286
2315
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
2287
2316
previous_response_id : Optional [str ] | NotGiven = NOT_GIVEN ,
2317
+ prompt : Optional [ResponsePromptParam ] | NotGiven = NOT_GIVEN ,
2318
+ prompt_cache_key : str | NotGiven = NOT_GIVEN ,
2288
2319
reasoning : Optional [Reasoning ] | NotGiven = NOT_GIVEN ,
2320
+ safety_identifier : str | NotGiven = NOT_GIVEN ,
2321
+ service_tier : Optional [Literal ["auto" , "default" , "flex" , "scale" , "priority" ]] | NotGiven = NOT_GIVEN ,
2289
2322
store : Optional [bool ] | NotGiven = NOT_GIVEN ,
2290
2323
stream_options : Optional [response_create_params .StreamOptions ] | NotGiven = NOT_GIVEN ,
2291
2324
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
2292
- text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
2325
+ text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
2293
2326
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
2327
+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
2294
2328
top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
2295
2329
truncation : Optional [Literal ["auto" , "disabled" ]] | NotGiven = NOT_GIVEN ,
2296
2330
user : str | NotGiven = NOT_GIVEN ,
@@ -2307,22 +2341,29 @@ def stream(
2307
2341
* ,
2308
2342
response_id : str | NotGiven = NOT_GIVEN ,
2309
2343
input : Union [str , ResponseInputParam ] | NotGiven = NOT_GIVEN ,
2310
- model : Union [ str , ChatModel ] | NotGiven = NOT_GIVEN ,
2344
+ model : ResponsesModel | NotGiven = NOT_GIVEN ,
2311
2345
background : Optional [bool ] | NotGiven = NOT_GIVEN ,
2312
2346
text_format : type [TextFormatT ] | NotGiven = NOT_GIVEN ,
2313
2347
tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
2348
+ conversation : Optional [response_create_params .Conversation ] | NotGiven = NOT_GIVEN ,
2314
2349
include : Optional [List [ResponseIncludable ]] | NotGiven = NOT_GIVEN ,
2315
2350
instructions : Optional [str ] | NotGiven = NOT_GIVEN ,
2316
2351
max_output_tokens : Optional [int ] | NotGiven = NOT_GIVEN ,
2352
+ max_tool_calls : Optional [int ] | NotGiven = NOT_GIVEN ,
2317
2353
metadata : Optional [Metadata ] | NotGiven = NOT_GIVEN ,
2318
2354
parallel_tool_calls : Optional [bool ] | NotGiven = NOT_GIVEN ,
2319
2355
previous_response_id : Optional [str ] | NotGiven = NOT_GIVEN ,
2356
+ prompt : Optional [ResponsePromptParam ] | NotGiven = NOT_GIVEN ,
2357
+ prompt_cache_key : str | NotGiven = NOT_GIVEN ,
2320
2358
reasoning : Optional [Reasoning ] | NotGiven = NOT_GIVEN ,
2359
+ safety_identifier : str | NotGiven = NOT_GIVEN ,
2360
+ service_tier : Optional [Literal ["auto" , "default" , "flex" , "scale" , "priority" ]] | NotGiven = NOT_GIVEN ,
2321
2361
store : Optional [bool ] | NotGiven = NOT_GIVEN ,
2322
2362
stream_options : Optional [response_create_params .StreamOptions ] | NotGiven = NOT_GIVEN ,
2323
2363
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
2324
- text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
2364
+ text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
2325
2365
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
2366
+ top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
2326
2367
top_p : Optional [float ] | NotGiven = NOT_GIVEN ,
2327
2368
truncation : Optional [Literal ["auto" , "disabled" ]] | NotGiven = NOT_GIVEN ,
2328
2369
user : str | NotGiven = NOT_GIVEN ,
@@ -2337,18 +2378,25 @@ def stream(
2337
2378
new_response_args = {
2338
2379
"input" : input ,
2339
2380
"model" : model ,
2381
+ "conversation" : conversation ,
2340
2382
"include" : include ,
2341
2383
"instructions" : instructions ,
2342
2384
"max_output_tokens" : max_output_tokens ,
2385
+ "max_tool_calls" : max_tool_calls ,
2343
2386
"metadata" : metadata ,
2344
2387
"parallel_tool_calls" : parallel_tool_calls ,
2345
2388
"previous_response_id" : previous_response_id ,
2389
+ "prompt" : prompt ,
2390
+ "prompt_cache_key" : prompt_cache_key ,
2346
2391
"reasoning" : reasoning ,
2392
+ "safety_identifier" : safety_identifier ,
2393
+ "service_tier" : service_tier ,
2347
2394
"store" : store ,
2348
2395
"stream_options" : stream_options ,
2349
2396
"temperature" : temperature ,
2350
2397
"text" : text ,
2351
2398
"tool_choice" : tool_choice ,
2399
+ "top_logprobs" : top_logprobs ,
2352
2400
"top_p" : top_p ,
2353
2401
"truncation" : truncation ,
2354
2402
"user" : user ,
@@ -2384,21 +2432,29 @@ def stream(
2384
2432
model = model ,
2385
2433
stream = True ,
2386
2434
tools = tools ,
2435
+ conversation = conversation ,
2387
2436
include = include ,
2388
2437
instructions = instructions ,
2389
2438
max_output_tokens = max_output_tokens ,
2439
+ max_tool_calls = max_tool_calls ,
2390
2440
metadata = metadata ,
2391
2441
parallel_tool_calls = parallel_tool_calls ,
2392
2442
previous_response_id = previous_response_id ,
2443
+ prompt = prompt ,
2444
+ prompt_cache_key = prompt_cache_key ,
2393
2445
store = store ,
2394
2446
stream_options = stream_options ,
2395
2447
temperature = temperature ,
2396
2448
text = text ,
2397
2449
tool_choice = tool_choice ,
2398
2450
reasoning = reasoning ,
2451
+ safety_identifier = safety_identifier ,
2452
+ service_tier = service_tier ,
2453
+ top_logprobs = top_logprobs ,
2399
2454
top_p = top_p ,
2400
2455
truncation = truncation ,
2401
2456
user = user ,
2457
+ background = background ,
2402
2458
extra_headers = extra_headers ,
2403
2459
extra_query = extra_query ,
2404
2460
extra_body = extra_body ,
@@ -2455,7 +2511,7 @@ async def parse(
2455
2511
stream : Optional [Literal [False ]] | Literal [True ] | NotGiven = NOT_GIVEN ,
2456
2512
stream_options : Optional [response_create_params .StreamOptions ] | NotGiven = NOT_GIVEN ,
2457
2513
temperature : Optional [float ] | NotGiven = NOT_GIVEN ,
2458
- text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
2514
+ text : ResponseTextConfigParam | NotGiven = NOT_GIVEN ,
2459
2515
tool_choice : response_create_params .ToolChoice | NotGiven = NOT_GIVEN ,
2460
2516
tools : Iterable [ParseableToolParam ] | NotGiven = NOT_GIVEN ,
2461
2517
top_logprobs : Optional [int ] | NotGiven = NOT_GIVEN ,
0 commit comments