@@ -91,6 +91,7 @@ async def test_ok(self, faker: faker.Faker, func_request: LLMFuncRequest) -> Non
91
91
+ ChatCompletionsStreamingEvent (choices = [OneStreamingChoice (delta = one_message )]).model_dump_json ()
92
92
for one_message in generated_messages
93
93
)
94
+ + f"\n \n data: { ChatCompletionsStreamingEvent (choices = []).model_dump_json ()} "
94
95
+ f"\n \n data: [DONE]\n \n data: { faker .pystr ()} \n \n "
95
96
)
96
97
response : typing .Final = httpx .Response (
@@ -104,23 +105,6 @@ async def test_ok(self, faker: faker.Faker, func_request: LLMFuncRequest) -> Non
104
105
105
106
assert result == expected_result
106
107
107
- async def test_fails_without_alternatives (self ) -> None :
108
- response_content : typing .Final = (
109
- f"data: { ChatCompletionsStreamingEvent .model_construct (choices = []).model_dump_json ()} \n \n "
110
- )
111
- response : typing .Final = httpx .Response (
112
- 200 ,
113
- headers = {"Content-Type" : "text/event-stream" },
114
- content = response_content ,
115
- )
116
- client : typing .Final = any_llm_client .get_client (
117
- OpenAIConfigFactory .build (),
118
- transport = httpx .MockTransport (lambda _ : response ),
119
- )
120
-
121
- with pytest .raises (LLMResponseValidationError ):
122
- await consume_llm_message_chunks (client .stream_llm_message_chunks (** LLMFuncRequestFactory .build ()))
123
-
124
108
125
109
class TestOpenAILLMErrors :
126
110
@pytest .mark .parametrize ("stream" , [True , False ])
0 commit comments