Skip to content

Commit

Permalink
feat: LLM - Improved the handling of temperature and top_p in streaming
Browse files Browse the repository at this point in the history
The `temperature` and `top_p` parameters have `float` type. However some people might pass integer values to these parameters. The streaming API is sensitive to the `int` vs. `float` parameter type and will throw an error. This CL mitigates that by converting integer values to floats.

PiperOrigin-RevId: 562738676
  • Loading branch information
Ark-kun authored and copybara-github committed Sep 5, 2023
1 parent 50c1591 commit 6566529
Showing 1 changed file with 10 additions and 0 deletions.
10 changes: 10 additions & 0 deletions vertexai/language_models/_language_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -759,9 +759,13 @@ def predict_streaming(
prediction_parameters["maxDecodeSteps"] = max_output_tokens

if temperature is not None:
if isinstance(temperature, int):
temperature = float(temperature)
prediction_parameters["temperature"] = temperature

if top_p:
if isinstance(top_p, int):
top_p = float(top_p)
prediction_parameters["topP"] = top_p

if top_k:
Expand Down Expand Up @@ -1389,10 +1393,14 @@ def _prepare_request(
if temperature is None:
temperature = self._temperature
if temperature is not None:
if isinstance(temperature, int):
temperature = float(temperature)
prediction_parameters["temperature"] = temperature

top_p = top_p or self._top_p
if top_p:
if isinstance(top_p, int):
top_p = float(top_p)
prediction_parameters["topP"] = top_p

top_k = top_k or self._top_k
Expand Down Expand Up @@ -1749,6 +1757,8 @@ def _create_prediction_request(
prediction_parameters = {}

if temperature is not None:
if isinstance(temperature, int):
temperature = float(temperature)
prediction_parameters["temperature"] = temperature

if max_output_tokens:
Expand Down

0 comments on commit 6566529

Please sign in to comment.