Skip to content

Commit

Permalink
Adding New Models and Enhancing Provider Functionality (#2689)
Browse files Browse the repository at this point in the history
* Adding New Models and Enhancing Provider Functionality

* fix(core): handle model errors and improve configuration

- Import ModelNotSupportedError for proper exception handling in model resolution
- Update login_url configuration to reference class URL attribute dynamically
- Remove redundant typing imports after internal module reorganization

* feat(g4f/Provider/PerplexityLabs.py): Add new Perplexity models and update provider listings

- Update PerplexityLabs provider with expanded Sonar model family including pro/reasoning variants
- Add new text models: sonar-reasoning-pro to supported model catalog
- Standardize model naming conventions across provider documentation

* feat(g4f/models.py): add Sonar Reasoning Pro model configuration

- Add new  model to Perplexity AI text models section
- Include model in ModelUtils.convert mapping with PerplexityLabs provider
- Maintain consistent configuration pattern with existing Sonar variants

* feat(docs/providers-and-models.md): update provider models and add new reasoning model

- Update PerplexityLabs text models to standardized sonar naming convention
- Add new sonar-reasoning-pro model to text models table
- Include latest Perplexity AI documentation references for new model

* docs(docs/providers-and-models.md): update AI providers documentation

- Remove deprecated chatgptt.me from no-auth providers list
- Delete redundant Auth column from HuggingSpace providers table
- Update PerplexityLabs model website URLs to sonar.perplexity.ai
- Adjust provider counts for GPT-4/GPT-4o models in text models section
- Fix inconsistent formatting in image models provider listings

* chore(g4f/models.py): remove deprecated ChatGptt provider integration

- Remove ChatGptt import from provider dependencies
- Exclude ChatGptt from default model's best_provider list
- Update gpt_4 model configuration to eliminate ChatGptt reference
- Modify gpt_4o vision model provider hierarchy
- Adjust gpt_4o_mini provider selection parameters

BREAKING CHANGE: Existing integrations using ChatGptt provider will no longer function

* Disabled provider (g4f/Provider/ChatGptt.py > g4f/Provider/not_working/ChatGptt.py): Problem with Cloudflare

* fix(g4f/Provider/CablyAI.py): update API endpoints and model configurations

* docs(docs/providers-and-models.md): update model listings and provider capabilities

* feat(g4f/models.py): Add Hermes-3 model and enhance provider configs

* feat(g4f/Provider/CablyAI.py): Add free tier indicators to model aliases

* refactor(g4f/tools/run_tools.py): modularize thinking chunk handling

* fix(g4f/Provider/DeepInfraChat.py): resolve duplicate keys and enhance request headers

* feat(g4f/Provider/DeepInfraChat.py): Add multimodal image support and improve model handling

* chore(g4f/models.py): update default vision model providers

* feat(docs/providers-and-models.md): update provider capabilities and model specifications

* Update docs/client.md

* docs(docs/providers-and-models.md): Update DeepInfraChat models documentation

* feat(g4f/Provider/DeepInfraChat.py): add new vision models and expand model aliases

* feat(g4f/models.py): update model configurations and add new providers

* feat(g4f/models.py): Update model configurations and add new AI models

---------

Co-authored-by: kqlio67 <>
  • Loading branch information
kqlio67 authored Feb 7, 2025
1 parent 5d35b74 commit 88e7ef9
Show file tree
Hide file tree
Showing 15 changed files with 381 additions and 229 deletions.
2 changes: 1 addition & 1 deletion docs/client.md
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,8 @@ for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content or "", end="")
```
---

---
### Using a Vision Model
**Analyze an image and generate a description:**
```python
Expand Down
59 changes: 34 additions & 25 deletions docs/providers-and-models.md

Large diffs are not rendered by default.

73 changes: 17 additions & 56 deletions g4f/Provider/Blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from ..image import to_data_uri
from ..cookies import get_cookies_dir
from .helper import format_prompt, format_image_prompt
from ..providers.response import JsonConversation, ImageResponse, Reasoning
from ..providers.response import JsonConversation, ImageResponse

class Conversation(JsonConversation):
validated_value: str = None
Expand All @@ -39,10 +39,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = default_model
default_image_model = 'ImageGeneration'
image_models = [default_image_model]
vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'DeepSeek-V3', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
reasoning_models = ['DeepSeek-R1']
vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']

userSelectedModel = ['gpt-4o', 'o3-mini', 'claude-sonnet-3.5', 'gemini-pro', 'blackboxai-pro']
userSelectedModel = ['gpt-4o', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO']

agentMode = {
'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"},
Expand All @@ -56,6 +55,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
}

trendingAgentMode = {
"o3-mini": {'mode': True, 'id': 'o3-mini'},
"gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
Expand Down Expand Up @@ -94,9 +94,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'builder Agent': {'mode': True, 'id': "builder Agent"},
}

models = list(dict.fromkeys([default_model, *userSelectedModel, *reasoning_models, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
models = list(dict.fromkeys([default_model, *userSelectedModel, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))

model_aliases = {
"gpt-4": "gpt-4o",
"claude-3.5-sonnet": "claude-sonnet-3.5",
"gemini-1.5-flash": "gemini-1.5-flash",
"gemini-1.5-pro": "gemini-pro",
"deepseek-v3": "DeepSeek-V3",
Expand Down Expand Up @@ -177,7 +179,6 @@ async def create_async_generator(
messages: Messages,
prompt: str = None,
proxy: str = None,
web_search: bool = False,
images: ImagesType = None,
top_p: float = None,
temperature: float = None,
Expand Down Expand Up @@ -283,60 +284,20 @@ async def create_async_generator(
"vscodeClient": False,
"codeInterpreterMode": False,
"customProfile": {"name": "", "occupation": "", "traits": [], "additionalInfo": "", "enableNewChats": False},
"webSearchMode": web_search
"session": {"user":{"name":"John Doe","email":"[email protected]","image":"https://lh3.googleusercontent.com/a/ACg8ocK9X7mNpQ2vR4jH3tY8wL5nB1xM6fDS9JW2kLpTn4Vy3hR2xN4m=s96-c"},"expires":datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z'), "status": "PREMIUM"},
"webSearchMode": False
}

async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
response_text = await response.text()
parts = response_text.split('$~~~$')
text_to_yield = parts[2] if len(parts) >= 3 else response_text

if not text_to_yield or text_to_yield.isspace():
return

if model in cls.reasoning_models and "\n\n\n" in text_to_yield:
think_split = text_to_yield.split("\n\n\n", 1)
if len(think_split) > 1:
think_content, answer = think_split[0].strip(), think_split[1].strip()
yield Reasoning(status=think_content)
yield answer
else:
yield text_to_yield
elif "<think>" in text_to_yield:
pre_think, rest = text_to_yield.split('<think>', 1)
think_content, post_think = rest.split('</think>', 1)

pre_think = pre_think.strip()
think_content = think_content.strip()
post_think = post_think.strip()

if pre_think:
yield pre_think
if think_content:
yield Reasoning(status=think_content)
if post_think:
yield post_think

elif "Generated by BLACKBOX.AI" in text_to_yield:
conversation.validated_value = await cls.fetch_validated(force_refresh=True)
if conversation.validated_value:
data["validated"] = conversation.validated_value
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as new_response:
await raise_for_status(new_response)
new_response_text = await new_response.text()
new_parts = new_response_text.split('$~~~$')
new_text = new_parts[2] if len(new_parts) >= 3 else new_response_text

if new_text and not new_text.isspace():
yield new_text
else:
if text_to_yield and not text_to_yield.isspace():
yield text_to_yield
else:
if text_to_yield and not text_to_yield.isspace():
yield text_to_yield
full_response = []
async for chunk in response.content.iter_any():
if chunk:
chunk_text = chunk.decode()
full_response.append(chunk_text)
yield chunk_text

if return_conversation:
conversation.message_history.append({"role": "assistant", "content": text_to_yield})
full_response_text = ''.join(full_response)
conversation.message_history.append({"role": "assistant", "content": full_response_text})
yield conversation
30 changes: 2 additions & 28 deletions g4f/Provider/BlackboxAPI.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from ..providers.response import Reasoning
from .helper import format_prompt

class BlackboxAPI(AsyncGeneratorProvider, ProviderModelMixin):
Expand All @@ -20,15 +19,15 @@ class BlackboxAPI(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True

default_model = 'deepseek-ai/DeepSeek-V3'
reasoning_models = ['deepseek-ai/DeepSeek-R1']
models = [
default_model,
'deepseek-ai/DeepSeek-R1',
'mistralai/Mistral-Small-24B-Instruct-2501',
'deepseek-ai/deepseek-llm-67b-chat',
'databricks/dbrx-instruct',
'Qwen/QwQ-32B-Preview',
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO'
] + reasoning_models
]

model_aliases = {
"deepseek-v3": "deepseek-ai/DeepSeek-V3",
Expand Down Expand Up @@ -65,39 +64,14 @@ async def create_async_generator(
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)

is_reasoning = False
current_reasoning = ""

async for chunk in response.content:
if not chunk:
continue

text = chunk.decode(errors='ignore')

if model in cls.reasoning_models:
if "<think>" in text:
text = text.replace("<think>", "")
is_reasoning = True
current_reasoning = text
continue

if "</think>" in text:
text = text.replace("</think>", "")
is_reasoning = False
current_reasoning += text
yield Reasoning(status=current_reasoning.strip())
current_reasoning = ""
continue

if is_reasoning:
current_reasoning += text
continue

try:
if text:
yield text
except Exception as e:
return

if is_reasoning and current_reasoning:
yield Reasoning(status=current_reasoning.strip())
70 changes: 55 additions & 15 deletions g4f/Provider/CablyAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,33 +4,45 @@
from .template import OpenaiTemplate

class CablyAI(OpenaiTemplate):
label = "CablyAI"
url = "https://cablyai.com"
login_url = url
url = "https://cablyai.com/chat"
login_url = "https://cablyai.com"
api_base = "https://cablyai.com/v1"
api_key = "sk-your-openai-api-key"

working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = 'gpt-4o-mini'
reasoning_models = ['deepseek-r1-uncensored']
fallback_models = [
default_model,
'searchgpt',
'llama-3.1-8b-instruct',
'deepseek-r1-uncensored',
'deepseek-r1',
'deepseek-reasoner',
'deepseek-v3',
'tinyswallow1.5b',
'andy-3.5',
'hermes-3-llama-3.2-3b',
'llama-3.1-8b-instruct',
'o3-mini',
'o3-mini-low',
] + reasoning_models

'sonar-reasoning',
'tinyswallow1.5b',
]

model_aliases = {
"gpt-4o-mini": "searchgpt",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"deepseek-r1": "deepseek-r1-uncensored",
"gpt-4o-mini": "searchgpt (free)",
"deepseek-r1": "deepseek-r1-uncensored (free)",
"deepseek-r1": "deepseek-reasoner (free)",
"hermes-3": "hermes-3-llama-3.2-3b (free)",
"llama-3.1-8b": "llama-3.1-8b-instruct (free)",
"o3-mini-low": "o3-mini-low (free)",
"o3-mini": "o3-mini-low (free)",
"o3-mini": "o3-mini (free)",
}

@classmethod
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
models = super().get_models(api_key, api_base);
Expand All @@ -42,6 +54,34 @@ def get_model(cls, model: str, **kwargs) -> str:
model = super().get_model(model, **kwargs)
return model.split(" (free)")[0]
except ModelNotSupportedError:
if f"{model} (free)" in cls.models:
if f"f{model} (free)" in cls.models:
return model
raise
raise

@classmethod
def create_async_generator(
cls,
model: str,
messages: Messages,
api_key: str = None,
stream: bool = True,
**kwargs
) -> AsyncResult:
api_key = api_key or cls.api_key
headers = {
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"Origin": cls.url,
"Referer": f"{cls.url}/chat",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
}
return super().create_async_generator(
model=model,
messages=messages,
api_key=api_key,
stream=stream,
headers=headers,
**kwargs
)
Loading

0 comments on commit 88e7ef9

Please sign in to comment.