diff --git a/examples/chat/playground/app.py b/examples/chat/playground/app.py index dbda0b62d..af205824b 100644 --- a/examples/chat/playground/app.py +++ b/examples/chat/playground/app.py @@ -15,8 +15,8 @@ models = { "openai": ["gpt-4o", "gpt-3.5-turbo"], "claude": [ - "claude-3-opus-20240229", - "claude-3-5-sonnet-20240620", + "claude-3-opus-latest", + "claude-3-5-sonnet-latest", "claude-3-haiku-20240307", ], "google": ["gemini-1.5-pro-latest"], diff --git a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py b/shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py index 397af3583..fa9b7859e 100644 --- a/shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py +++ b/shiny/templates/chat/enterprise/aws-bedrock-anthropic/app.py @@ -39,7 +39,7 @@ async def _(): messages = chat.messages(format="anthropic") # Create a response message stream response = llm.messages.create( - model="anthropic.claude-3-sonnet-20240229-v1:0", + model="anthropic.claude-3-5-sonnet-20241022-v2:0", messages=messages, stream=True, max_tokens=1000, diff --git a/shiny/templates/chat/hello-providers/anthropic/app.py b/shiny/templates/chat/hello-providers/anthropic/app.py index 01df47db9..e3bae5966 100644 --- a/shiny/templates/chat/hello-providers/anthropic/app.py +++ b/shiny/templates/chat/hello-providers/anthropic/app.py @@ -35,7 +35,7 @@ async def _(): messages = chat.messages(format="anthropic") # Create a response message stream response = await llm.messages.create( - model="claude-3-5-sonnet-20240620", + model="claude-3-5-sonnet-latest", messages=messages, stream=True, max_tokens=1000, diff --git a/shiny/templates/chat/production/anthropic/app.py b/shiny/templates/chat/production/anthropic/app.py index b326334a2..423d4337c 100644 --- a/shiny/templates/chat/production/anthropic/app.py +++ b/shiny/templates/chat/production/anthropic/app.py @@ -24,7 +24,7 @@ MODEL_INFO = { - "name": "claude-3-5-sonnet-20240620", + "name": "claude-3-5-sonnet-20241022", # DISCLAIMER: Anthropic has not yet released a public tokenizer for Claude models, # so this uses the generic default provided by Chat() (for now). That is probably # ok though since the default tokenizer likely overestimates the token count.