diff --git a/gpt_researcher/utils/llm.py b/gpt_researcher/utils/llm.py index 9e5d43802..0d0f96dd4 100644 --- a/gpt_researcher/utils/llm.py +++ b/gpt_researcher/utils/llm.py @@ -34,8 +34,8 @@ async def create_chat_completion( Args: messages (list[dict[str, str]]): The messages to send to the chat completion model (str, optional): The model to use. Defaults to None. - temperature (float, optional): The temperature to use. Defaults to 0.9. - max_tokens (int, optional): The max tokens to use. Defaults to None. + temperature (float, optional): The temperature to use. Defaults to 0.4. + max_tokens (int, optional): The max tokens to use. Defaults to 4000. stream (bool, optional): Whether to stream the response. Defaults to False. llm_provider (str, optional): The LLM Provider to use. webocket (WebSocket): The websocket used in the currect request,