Skip to content

Commit

Permalink
feat(api): api update (#2396)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] committed Jan 29, 2025
1 parent dd47974 commit ea18920
Show file tree
Hide file tree
Showing 4 changed files with 87 additions and 5 deletions.
2 changes: 1 addition & 1 deletion .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 1508
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/cloudflare%2Fcloudflare-93781888ccca411928a3826373dfd7531806fd2eb4c35de249b142676f00b56f.yml
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/cloudflare%2Fcloudflare-ac165d4964ca2ce8972ff49caf15d7f0d11b3ee052b35a32ee3b3111fe77bef3.yml
52 changes: 48 additions & 4 deletions src/cloudflare/resources/ai/ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -656,10 +656,16 @@ def run(
*,
account_id: str,
image: Iterable[float],
frequency_penalty: float | NotGiven = NOT_GIVEN,
max_tokens: int | NotGiven = NOT_GIVEN,
presence_penalty: float | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
raw: bool | NotGiven = NOT_GIVEN,
repetition_penalty: float | NotGiven = NOT_GIVEN,
seed: float | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
top_k: float | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -682,16 +688,32 @@ def run(
image: An array of integers that represent the image data constrained to 8-bit unsigned
integer values
frequency_penalty: Decreases the likelihood of the model repeating the same lines verbatim.
max_tokens: The maximum number of tokens to generate in the response.
presence_penalty: Increases the likelihood of the model introducing new topics.
prompt: The input text prompt for the model to generate a response.
raw: If true, a chat template is not applied and you must adhere to the specific
model's expected formatting.
repetition_penalty: Penalty for repeated tokens; higher values discourage repetition.
seed: Random seed for reproducibility of the generation.
temperature: Controls the randomness of the output; higher values produce more random
results.
top_k: Limits the AI to choose from the top 'k' most probable words. Lower values make
responses more focused; higher values introduce more variety and potential
surprises.
top_p: Controls the creativity of the AI's responses by adjusting how many possible
words it considers. Lower values make outputs more predictable; higher values
allow for more varied and creative responses.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
Expand Down Expand Up @@ -726,7 +748,7 @@ def run(
mask: Iterable[float] | NotGiven = NOT_GIVEN,
negative_prompt: str | NotGiven = NOT_GIVEN,
num_steps: int | NotGiven = NOT_GIVEN,
seed: int | NotGiven = NOT_GIVEN,
seed: int | float | NotGiven = NOT_GIVEN,
strength: float | NotGiven = NOT_GIVEN,
width: int | NotGiven = NOT_GIVEN,
lang: str | NotGiven = NOT_GIVEN,
Expand All @@ -741,7 +763,7 @@ def run(
repetition_penalty: float | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_k: int | float | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
messages: Iterable[ai_run_params.MessagesMessage] | NotGiven = NOT_GIVEN,
functions: Iterable[ai_run_params.MessagesFunction] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -1408,10 +1430,16 @@ async def run(
*,
account_id: str,
image: Iterable[float],
frequency_penalty: float | NotGiven = NOT_GIVEN,
max_tokens: int | NotGiven = NOT_GIVEN,
presence_penalty: float | NotGiven = NOT_GIVEN,
prompt: str | NotGiven = NOT_GIVEN,
raw: bool | NotGiven = NOT_GIVEN,
repetition_penalty: float | NotGiven = NOT_GIVEN,
seed: float | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
top_k: float | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand All @@ -1434,16 +1462,32 @@ async def run(
image: An array of integers that represent the image data constrained to 8-bit unsigned
integer values
frequency_penalty: Decreases the likelihood of the model repeating the same lines verbatim.
max_tokens: The maximum number of tokens to generate in the response.
presence_penalty: Increases the likelihood of the model introducing new topics.
prompt: The input text prompt for the model to generate a response.
raw: If true, a chat template is not applied and you must adhere to the specific
model's expected formatting.
repetition_penalty: Penalty for repeated tokens; higher values discourage repetition.
seed: Random seed for reproducibility of the generation.
temperature: Controls the randomness of the output; higher values produce more random
results.
top_k: Limits the AI to choose from the top 'k' most probable words. Lower values make
responses more focused; higher values introduce more variety and potential
surprises.
top_p: Controls the creativity of the AI's responses by adjusting how many possible
words it considers. Lower values make outputs more predictable; higher values
allow for more varied and creative responses.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
Expand Down Expand Up @@ -1478,7 +1522,7 @@ async def run(
mask: Iterable[float] | NotGiven = NOT_GIVEN,
negative_prompt: str | NotGiven = NOT_GIVEN,
num_steps: int | NotGiven = NOT_GIVEN,
seed: int | NotGiven = NOT_GIVEN,
seed: int | float | NotGiven = NOT_GIVEN,
strength: float | NotGiven = NOT_GIVEN,
width: int | NotGiven = NOT_GIVEN,
lang: str | NotGiven = NOT_GIVEN,
Expand All @@ -1493,7 +1537,7 @@ async def run(
repetition_penalty: float | NotGiven = NOT_GIVEN,
stream: bool | NotGiven = NOT_GIVEN,
temperature: float | NotGiven = NOT_GIVEN,
top_k: int | NotGiven = NOT_GIVEN,
top_k: int | float | NotGiven = NOT_GIVEN,
top_p: float | NotGiven = NOT_GIVEN,
messages: Iterable[ai_run_params.MessagesMessage] | NotGiven = NOT_GIVEN,
functions: Iterable[ai_run_params.MessagesFunction] | NotGiven = NOT_GIVEN,
Expand Down
26 changes: 26 additions & 0 deletions src/cloudflare/types/ai/ai_run_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,9 +377,15 @@ class ImageToText(TypedDict, total=False):
integer values
"""

frequency_penalty: float
"""Decreases the likelihood of the model repeating the same lines verbatim."""

max_tokens: int
"""The maximum number of tokens to generate in the response."""

presence_penalty: float
"""Increases the likelihood of the model introducing new topics."""

prompt: str
"""The input text prompt for the model to generate a response."""

Expand All @@ -389,12 +395,32 @@ class ImageToText(TypedDict, total=False):
model's expected formatting.
"""

repetition_penalty: float
"""Penalty for repeated tokens; higher values discourage repetition."""

seed: float
"""Random seed for reproducibility of the generation."""

temperature: float
"""
Controls the randomness of the output; higher values produce more random
results.
"""

top_k: float
"""Limits the AI to choose from the top 'k' most probable words.
Lower values make responses more focused; higher values introduce more variety
and potential surprises.
"""

top_p: float
"""
Controls the creativity of the AI's responses by adjusting how many possible
words it considers. Lower values make outputs more predictable; higher values
allow for more varied and creative responses.
"""


AIRunParams: TypeAlias = Union[
TextClassification,
Expand Down
12 changes: 12 additions & 0 deletions tests/api_resources/test_ai.py
Original file line number Diff line number Diff line change
Expand Up @@ -775,10 +775,16 @@ def test_method_run_with_all_params_overload_12(self, client: Cloudflare) -> Non
model_name="model_name",
account_id="023e105f4ecef8ad9ca31a8372d0c353",
image=[0],
frequency_penalty=0,
max_tokens=0,
presence_penalty=0,
prompt="prompt",
raw=True,
repetition_penalty=0,
seed=0,
temperature=0,
top_k=0,
top_p=0,
)
assert_matches_type(Optional[AIRunResponse], ai, path=["response"])

Expand Down Expand Up @@ -1588,10 +1594,16 @@ async def test_method_run_with_all_params_overload_12(self, async_client: AsyncC
model_name="model_name",
account_id="023e105f4ecef8ad9ca31a8372d0c353",
image=[0],
frequency_penalty=0,
max_tokens=0,
presence_penalty=0,
prompt="prompt",
raw=True,
repetition_penalty=0,
seed=0,
temperature=0,
top_k=0,
top_p=0,
)
assert_matches_type(Optional[AIRunResponse], ai, path=["response"])

Expand Down

0 comments on commit ea18920

Please sign in to comment.