diff --git a/vertexai/language_models/_language_models.py b/vertexai/language_models/_language_models.py index 9092ae193f..c4fdaceec6 100644 --- a/vertexai/language_models/_language_models.py +++ b/vertexai/language_models/_language_models.py @@ -759,9 +759,13 @@ def predict_streaming( prediction_parameters["maxDecodeSteps"] = max_output_tokens if temperature is not None: + if isinstance(temperature, int): + temperature = float(temperature) prediction_parameters["temperature"] = temperature if top_p: + if isinstance(top_p, int): + top_p = float(top_p) prediction_parameters["topP"] = top_p if top_k: @@ -1389,10 +1393,14 @@ def _prepare_request( if temperature is None: temperature = self._temperature if temperature is not None: + if isinstance(temperature, int): + temperature = float(temperature) prediction_parameters["temperature"] = temperature top_p = top_p or self._top_p if top_p: + if isinstance(top_p, int): + top_p = float(top_p) prediction_parameters["topP"] = top_p top_k = top_k or self._top_k @@ -1749,6 +1757,8 @@ def _create_prediction_request( prediction_parameters = {} if temperature is not None: + if isinstance(temperature, int): + temperature = float(temperature) prediction_parameters["temperature"] = temperature if max_output_tokens: