@@ -86,7 +86,9 @@ def check_tool_calling(response, first_resp=True, prefix=""):
8686@pytest .mark .asyncio (loop_scope = "module" )
8787async def test_reasoning (client : openai .AsyncOpenAI , model : str ):
8888 response = await client .responses .create (
89- model = model , input = "Which one is larger as numeric, 9.9 or 9.11?" )
89+ model = model ,
90+ input = "Which one is larger as numeric, 9.9 or 9.11?" ,
91+ max_output_tokens = 1024 )
9092
9193 check_reponse (response , "test_reasoning: " )
9294
@@ -96,9 +98,10 @@ async def test_reasoning_effort(client: openai.AsyncOpenAI, model: str):
9698 for effort in ["low" , "medium" , "high" ]:
9799 response = await client .responses .create (
98100 model = model ,
99- instructions = "Use less than 1024 tokens for reasoning " ,
101+ instructions = "Use less than 1024 tokens for the whole response " ,
100102 input = "Which one is larger as numeric, 9.9 or 9.11?" ,
101- reasoning = {"effort" : effort })
103+ reasoning = {"effort" : effort },
104+ max_output_tokens = 1024 )
102105 check_reponse (response , f"test_reasoning_effort_{ effort } : " )
103106
104107
@@ -121,20 +124,23 @@ async def test_chat(client: openai.AsyncOpenAI, model: str):
121124 }, {
122125 "role" : "user" ,
123126 "content" : "Tell me a joke."
124- }])
127+ }],
128+ max_output_tokens = 1024 )
125129 check_reponse (response , "test_chat: " )
126130
127131
128132@pytest .mark .asyncio (loop_scope = "module" )
129133async def test_multi_turn_chat (client : openai .AsyncOpenAI , model : str ):
130134 response = await client .responses .create (model = model ,
131- input = "What is the answer of 1+1?" )
135+ input = "What is the answer of 1+1?" ,
136+ max_output_tokens = 1024 )
132137 check_reponse (response , "test_multi_turn_chat_1: " )
133138
134139 response_2 = await client .responses .create (
135140 model = model ,
136141 input = "What is the answer of previous question?" ,
137- previous_response_id = response .id )
142+ previous_response_id = response .id ,
143+ max_output_tokens = 1024 )
138144 check_reponse (response_2 , "test_multi_turn_chat_2: " )
139145
140146
@@ -168,11 +174,10 @@ async def test_tool_calls(client: openai.AsyncOpenAI, model: str):
168174 }
169175 }
170176 messages = [{"role" : "user" , "content" : "What is the weather like in SF?" }]
171- response = await client .responses .create (
172- model = model ,
173- input = messages ,
174- tools = [tool_get_current_weather ],
175- )
177+ response = await client .responses .create (model = model ,
178+ input = messages ,
179+ tools = [tool_get_current_weather ],
180+ max_output_tokens = 1024 )
176181 messages .extend (response .output )
177182 function_call = check_tool_calling (response , True , "test_tool_calls: " )
178183
@@ -188,7 +193,8 @@ async def test_tool_calls(client: openai.AsyncOpenAI, model: str):
188193
189194 response = await client .responses .create (model = model ,
190195 input = messages ,
191- tools = [tool_get_current_weather ])
196+ tools = [tool_get_current_weather ],
197+ max_output_tokens = 1024 )
192198
193199 check_tool_calling (response , False , "test_tool_calls: " )
194200
@@ -199,7 +205,7 @@ async def test_streaming(client: openai.AsyncOpenAI, model: str):
199205 model = model ,
200206 input = "Explain the theory of relativity in brief." ,
201207 stream = True ,
202- )
208+ max_output_tokens = 1024 )
203209
204210 reasoning_deltas , message_deltas = list (), list ()
205211 async for event in stream :
@@ -240,12 +246,11 @@ async def test_streaming_tool_call(client: openai.AsyncOpenAI, model: str):
240246 }
241247 }
242248 messages = [{"role" : "user" , "content" : "What is the weather like in SF?" }]
243- stream = await client .responses .create (
244- model = model ,
245- input = messages ,
246- tools = [tool_get_current_weather ],
247- stream = True ,
248- )
249+ stream = await client .responses .create (model = model ,
250+ input = messages ,
251+ tools = [tool_get_current_weather ],
252+ stream = True ,
253+ max_output_tokens = 1024 )
249254
250255 function_call = None
251256 reasoning_deltas = list ()
0 commit comments