From 722171e5be104f3f607c3762fd00b5632dde90d0 Mon Sep 17 00:00:00 2001 From: Richard Park Date: Tue, 26 Sep 2023 11:24:10 -0700 Subject: [PATCH] Add skip logic if OpenAI is overloaded. --- sdk/ai/azopenai/client_chat_completions_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/sdk/ai/azopenai/client_chat_completions_test.go b/sdk/ai/azopenai/client_chat_completions_test.go index 6ae87db95221..5f7fe66bf3f0 100644 --- a/sdk/ai/azopenai/client_chat_completions_test.go +++ b/sdk/ai/azopenai/client_chat_completions_test.go @@ -10,6 +10,7 @@ import ( "context" "errors" "io" + "net/http" "os" "testing" @@ -89,6 +90,11 @@ func testGetChatCompletions(t *testing.T, client *azopenai.Client, tv testVars) } resp, err := client.GetChatCompletions(context.Background(), newTestChatCompletionOptions(tv), nil) + + if respErr := (*azcore.ResponseError)(nil); errors.As(err, &respErr) && respErr.StatusCode == http.StatusTooManyRequests { + t.Skipf("OpenAI resource overloaded, skipping this test") + } + require.NoError(t, err) if tv.Endpoint.Azure { @@ -111,6 +117,11 @@ func testGetChatCompletions(t *testing.T, client *azopenai.Client, tv testVars) func testGetChatCompletionsStream(t *testing.T, client *azopenai.Client, tv testVars) { streamResp, err := client.GetChatCompletionsStream(context.Background(), newTestChatCompletionOptions(tv), nil) + + if respErr := (*azcore.ResponseError)(nil); errors.As(err, &respErr) && respErr.StatusCode == http.StatusTooManyRequests { + t.Skipf("OpenAI resource overloaded, skipping this test") + } + require.NoError(t, err) // the data comes back differently for streaming