Skip to content

Commit

Permalink
Two providers 'AIChatFree, AutonomousAI' are disabled
Browse files Browse the repository at this point in the history
  • Loading branch information
kqlio67 committed Jan 31, 2025
1 parent c2b7272 commit 210ac8a
Show file tree
Hide file tree
Showing 6 changed files with 21 additions and 39 deletions.
10 changes: 4 additions & 6 deletions docs/providers-and-models.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,21 +143,19 @@ This document provides an overview of various AI providers and models, including
|llama-3.2-1b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)|
|llama-3.2-3b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-3B)|
|llama-3.2-11b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
|llama-3.2-70b|Meta Llama|1+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)|
|llama-3.2-90b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision)|
|llama-3.3-70b|Meta Llama|7+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-3/)|
|llama-3.2-90b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision)|
|llama-3.3-70b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-3/)|
|mixtral-7b|Mistral|1+|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
|mixtral-8x7b|Mistral|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)|
|mistral-nemo|Mistral|3+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)|
|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)|
|hermes-3|NousResearch|1+ Providers|[nousresearch.com](https://nousresearch.com/hermes3/)|
|phi-3.5-mini|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)|
|wizardlm-2-7b|Microsoft|1+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)|
|wizardlm-2-8x22b|Microsoft|2+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)|
|gemini|Google DeepMind|1+|[deepmind.google](http://deepmind.google/technologies/gemini/)|
|gemini-exp|Google DeepMind|1+ Providers|[blog.google](https://blog.google/feed/gemini-exp-1206/)|
|gemini-1.5-flash|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-1.5-pro|Google DeepMind|7+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|gemini-1.5-pro|Google DeepMind|6+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)|
|gemini-2.0-flash|Google DeepMind|2+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)|
|gemini-2.0-flash-thinking|Google DeepMind|1+ Providers|[ai.google.dev](https://ai.google.dev/gemini-api/docs/thinking-mode)|
|claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)|
Expand All @@ -174,7 +172,7 @@ This document provides an overview of various AI providers and models, including
|qwen-2-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)|
|qwen-2-vl-7b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-VL-7B)|
|qwen-2.5-72b|Qwen|3+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct)|
|qwen-2.5-coder-32b|Qwen|5+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-Coder-32B)|
|qwen-2.5-coder-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-Coder-32B)|
|qwen-2.5-1m-demo|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-1M-Demo)|
|qwq-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/QwQ-32B-Preview)|
|qvq-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/QVQ-72B-Preview)|
Expand Down
2 changes: 0 additions & 2 deletions g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
from .mini_max import HailuoAI, MiniMax
from .template import OpenaiTemplate, BackendApi

from .AIChatFree import AIChatFree
from .AutonomousAI import AutonomousAI
from .Blackbox import Blackbox
from .CablyAI import CablyAI
from .ChatGLM import ChatGLM
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,17 @@

from aiohttp import BaseConnector, ClientSession

from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...errors import RateLimitError
from ...requests import raise_for_status
from ...requests.aiohttp import get_connector
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin


class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info"

working = True
working = False
supports_stream = True
supports_message_history = True

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
import base64
import json

from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from ..providers.response import FinishReason
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...typing import AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ...providers.response import FinishReason
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin

class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.autonomous.ai/anon/"
Expand All @@ -19,7 +19,7 @@ class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
"summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary"
}

working = True
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
Expand Down
2 changes: 2 additions & 0 deletions g4f/Provider/not_working/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from .AI365VIP import AI365VIP
from .AIChatFree import AIChatFree
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
from .Airforce import Airforce
from .AutonomousAI import AutonomousAI
from .AIUncensored import AIUncensored
from .AmigoChat import AmigoChat
from .Aura import Aura
Expand Down
24 changes: 4 additions & 20 deletions g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
from .Provider import IterListProvider, ProviderType
from .Provider import (
### no auth required ###
AIChatFree,
AutonomousAI,
Blackbox,
CablyAI,
ChatGLM,
Expand Down Expand Up @@ -231,23 +229,17 @@ class VisionModel(Model):
best_provider = IterListProvider([Jmuz, HuggingChat, HuggingFace])
)

llama_3_2_70b = Model(
name = "llama-3.2-70b",
base_provider = "Meta Llama",
best_provider = AutonomousAI
)

llama_3_2_90b = Model(
name = "llama-3.2-90b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Jmuz, AutonomousAI])
best_provider = Jmuz
)

# llama 3.3
llama_3_3_70b = Model(
name = "llama-3.3-70b",
base_provider = "Meta Llama",
best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat, HuggingFace])
best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, Jmuz, HuggingChat, HuggingFace])
)

### Mistral ###
Expand Down Expand Up @@ -276,12 +268,6 @@ class VisionModel(Model):
best_provider = Blackbox
)

hermes_3 = Model(
name = "hermes-3",
base_provider = "NousResearch",
best_provider = AutonomousAI
)


### Microsoft ###
# phi
Expand Down Expand Up @@ -329,7 +315,7 @@ class VisionModel(Model):
gemini_1_5_pro = Model(
name = 'gemini-1.5-pro',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([Blackbox, Jmuz, GPROChat, AIChatFree, Gemini, GeminiPro, Liaobots])
best_provider = IterListProvider([Blackbox, Jmuz, GPROChat, Gemini, GeminiPro, Liaobots])
)

# gemini-2.0
Expand Down Expand Up @@ -436,7 +422,7 @@ class VisionModel(Model):
qwen_2_5_coder_32b = Model(
name = 'qwen-2.5-coder-32b',
base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat])
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, Jmuz, HuggingChat])
)
qwen_2_5_1m = Model(
name = 'qwen-2.5-1m-demo',
Expand Down Expand Up @@ -665,7 +651,6 @@ class ModelUtils:
llama_3_2_1b.name: llama_3_2_1b,
llama_3_2_3b.name: llama_3_2_3b,
llama_3_2_11b.name: llama_3_2_11b,
llama_3_2_70b.name: llama_3_2_70b,
llama_3_2_90b.name: llama_3_2_90b,

# llama-3.3
Expand All @@ -678,7 +663,6 @@ class ModelUtils:

### NousResearch ###
hermes_2_dpo.name: hermes_2_dpo,
hermes_3.name: hermes_3,

### Microsoft ###
# phi
Expand Down

0 comments on commit 210ac8a

Please sign in to comment.