From 3b091647fafb7bbc6e5ea12b1dd3d25281b3657d Mon Sep 17 00:00:00 2001
From: kqlio67 <166700875+kqlio67@users.noreply.github.com>
Date: Fri, 31 Jan 2025 23:49:08 +0000
Subject: Added new models and providers, improved interface components,
improved providers (#2646)
* Fix conflict g4f/gui/client/static/css/style.css g4f/gui/client/static/js/chat.v1.js g4f/models.py g4f/Provider/.
* Update g4f/Provider/Blackbox.py
* Update g4f/Provider/PollinationsAI.py
* Update docs/providers-and-models.md
* Disabled provider 'AIUncensored'
* Two providers 'AIChatFree, AutonomousAI' are disabled
---------
Co-authored-by: kqlio67 <>
---
g4f/Provider/AIChatFree.py | 73 -------------------
g4f/Provider/AIUncensored.py | 116 ------------------------------
g4f/Provider/AutonomousAI.py | 88 -----------------------
g4f/Provider/Blackbox.py | 112 ++++++++++++++---------------
g4f/Provider/DeepInfraChat.py | 2 +-
g4f/Provider/Glider.py | 120 +++++++++++++++++++++++++++++++
g4f/Provider/PerplexityLabs.py | 4 --
g4f/Provider/PollinationsAI.py | 57 ++++++++-------
g4f/Provider/__init__.py | 4 +-
g4f/Provider/not_working/AIChatFree.py | 73 +++++++++++++++++++
g4f/Provider/not_working/AIUncensored.py | 116 ++++++++++++++++++++++++++++++
g4f/Provider/not_working/AutonomousAI.py | 88 +++++++++++++++++++++++
g4f/Provider/not_working/__init__.py | 3 +
g4f/gui/client/static/css/style.css | 43 +++++++++++
g4f/gui/client/static/js/chat.v1.js | 56 +++++++++++----
g4f/models.py | 75 ++++++++++---------
16 files changed, 608 insertions(+), 422 deletions(-)
delete mode 100644 g4f/Provider/AIChatFree.py
delete mode 100644 g4f/Provider/AIUncensored.py
delete mode 100644 g4f/Provider/AutonomousAI.py
create mode 100644 g4f/Provider/Glider.py
create mode 100644 g4f/Provider/not_working/AIChatFree.py
create mode 100644 g4f/Provider/not_working/AIUncensored.py
create mode 100644 g4f/Provider/not_working/AutonomousAI.py
(limited to 'g4f')
diff --git a/g4f/Provider/AIChatFree.py b/g4f/Provider/AIChatFree.py
deleted file mode 100644
index 0e6f394a..00000000
--- a/g4f/Provider/AIChatFree.py
+++ /dev/null
@@ -1,73 +0,0 @@
-from __future__ import annotations
-
-import time
-from hashlib import sha256
-
-from aiohttp import BaseConnector, ClientSession
-
-from ..errors import RateLimitError
-from ..requests import raise_for_status
-from ..requests.aiohttp import get_connector
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-
-class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://aichatfree.info"
-
- working = True
- supports_stream = True
- supports_message_history = True
-
- default_model = 'gemini-1.5-pro'
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- connector: BaseConnector = None,
- **kwargs,
- ) -> AsyncResult:
- headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
- "Accept": "*/*",
- "Accept-Language": "en-US,en;q=0.5",
- "Accept-Encoding": "gzip, deflate, br",
- "Content-Type": "text/plain;charset=UTF-8",
- "Referer": f"{cls.url}/",
- "Origin": cls.url,
- }
- async with ClientSession(
- connector=get_connector(connector, proxy), headers=headers
- ) as session:
- timestamp = int(time.time() * 1e3)
- data = {
- "messages": [
- {
- "role": "model" if message["role"] == "assistant" else "user",
- "parts": [{"text": message["content"]}],
- }
- for message in messages
- ],
- "time": timestamp,
- "pass": None,
- "sign": generate_signature(timestamp, messages[-1]["content"]),
- }
- async with session.post(
- f"{cls.url}/api/generate", json=data, proxy=proxy
- ) as response:
- if response.status == 500:
- if "Quota exceeded" in await response.text():
- raise RateLimitError(
- f"Response {response.status}: Rate limit reached"
- )
- await raise_for_status(response)
- async for chunk in response.content.iter_any():
- yield chunk.decode(errors="ignore")
-
-
-def generate_signature(time: int, text: str, secret: str = ""):
- message = f"{time}:{text}:{secret}"
- return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py
deleted file mode 100644
index 8ff38c22..00000000
--- a/g4f/Provider/AIUncensored.py
+++ /dev/null
@@ -1,116 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import time
-import hmac
-import hashlib
-import json
-import random
-
-from ..typing import AsyncResult, Messages
-from ..requests.raise_for_status import raise_for_status
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-from ..providers.response import FinishReason
-
-class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.aiuncensored.info/ai_uncensored"
- api_key = "62852b00cb9e44bca86f0ec7e7455dc6"
-
- working = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = "hermes3-70b"
- models = [default_model]
-
- model_aliases = {"hermes-3": "hermes3-70b"}
-
- @staticmethod
- def calculate_signature(timestamp: str, json_dict: dict) -> str:
- message = f"{timestamp}{json.dumps(json_dict)}"
- secret_key = b'your-super-secret-key-replace-in-production'
- signature = hmac.new(
- secret_key,
- message.encode('utf-8'),
- hashlib.sha256
- ).hexdigest()
- return signature
-
- @staticmethod
- def get_server_url() -> str:
- servers = [
- "https://llm-server-nov24-ibak.onrender.com",
- "https://llm-server-nov24-qv2w.onrender.com",
- "https://llm-server-nov24.onrender.com"
- ]
- return random.choice(servers)
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- stream: bool = False,
- proxy: str = None,
- api_key: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
-
- timestamp = str(int(time.time()))
-
- json_dict = {
- "messages": [{"role": "user", "content": format_prompt(messages)}],
- "model": model,
- "stream": stream
- }
-
- signature = cls.calculate_signature(timestamp, json_dict)
-
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'origin': 'https://www.aiuncensored.info',
- 'referer': 'https://www.aiuncensored.info/',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
- 'x-api-key': cls.api_key,
- 'x-timestamp': timestamp,
- 'x-signature': signature
- }
-
- url = f"{cls.get_server_url()}/api/chat"
-
- async with ClientSession(headers=headers) as session:
- async with session.post(url, json=json_dict, proxy=proxy) as response:
- await raise_for_status(response)
-
- if stream:
- full_response = ""
- async for line in response.content:
- if line:
- try:
- line_text = line.decode('utf-8')
- if line_text.startswith(''):
- data = line_text[6:]
- if data == '[DONE]':
- yield FinishReason("stop")
- break
- try:
- json_data = json.loads(data)
- if 'data' in json_data:
- yield json_data['data']
- full_response += json_data['data']
- except json.JSONDecodeError:
- continue
- except UnicodeDecodeError:
- continue
- if full_response:
- yield FinishReason("length")
- else:
- response_json = await response.json()
- if 'content' in response_json:
- yield response_json['content']
- yield FinishReason("length")
diff --git a/g4f/Provider/AutonomousAI.py b/g4f/Provider/AutonomousAI.py
deleted file mode 100644
index 88e1eeea..00000000
--- a/g4f/Provider/AutonomousAI.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from __future__ import annotations
-
-from aiohttp import ClientSession
-import base64
-import json
-
-from ..typing import AsyncResult, Messages
-from ..requests.raise_for_status import raise_for_status
-from ..providers.response import FinishReason
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-
-class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
- url = "https://www.autonomous.ai/anon/"
- api_endpoints = {
- "llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
- "qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
- "hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes",
- "vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision",
- "summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary"
- }
-
- working = True
- supports_stream = True
- supports_system_message = True
- supports_message_history = True
-
- default_model = "llama"
- models = [default_model, "qwen_coder", "hermes", "vision", "summary"]
-
- model_aliases = {
- "llama-3.3-70b": default_model,
- "qwen-2.5-coder-32b": "qwen_coder",
- "hermes-3": "hermes",
- "llama-3.2-90b": "vision",
- "llama-3.2-70b": "summary",
- }
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- stream: bool = False,
- **kwargs
- ) -> AsyncResult:
- api_endpoint = cls.api_endpoints[model]
- headers = {
- 'accept': '*/*',
- 'accept-language': 'en-US,en;q=0.9',
- 'content-type': 'application/json',
- 'country-code': 'US',
- 'origin': 'https://www.autonomous.ai',
- 'referer': 'https://www.autonomous.ai/',
- 'time-zone': 'America/New_York',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
- }
-
- async with ClientSession(headers=headers) as session:
- message_json = json.dumps(messages)
- encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore")
-
- data = {
- "messages": encoded_message,
- "threadId": model,
- "stream": stream,
- "aiAgent": model
- }
-
- async with session.post(api_endpoint, json=data, proxy=proxy) as response:
- await raise_for_status(response)
- async for chunk in response.content:
- if chunk:
- chunk_str = chunk.decode()
- if chunk_str == "data: [DONE]":
- continue
-
- try:
- # Remove "data: " prefix and parse JSON
- chunk_data = json.loads(chunk_str.replace("data: ", ""))
- if "choices" in chunk_data and chunk_data["choices"]:
- delta = chunk_data["choices"][0].get("delta", {})
- if "content" in delta and delta["content"]:
- yield delta["content"]
- if "finish_reason" in chunk_data and chunk_data["finish_reason"]:
- yield FinishReason(chunk_data["finish_reason"])
- except json.JSONDecodeError:
- continue
diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py
index 09d8c196..42b7cf22 100644
--- a/g4f/Provider/Blackbox.py
+++ b/g4f/Provider/Blackbox.py
@@ -1,12 +1,13 @@
from __future__ import annotations
from aiohttp import ClientSession
-
import re
import json
import random
import string
from pathlib import Path
+from typing import Optional
+from datetime import datetime, timezone
from ..typing import AsyncResult, Messages, ImagesType
from ..requests.raise_for_status import raise_for_status
@@ -14,7 +15,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import ImageResponse, to_data_uri
from ..cookies import get_cookies_dir
from .helper import format_prompt, format_image_prompt
-from ..providers.response import FinishReason, JsonConversation, Reasoning
+from ..providers.response import JsonConversation, Reasoning
class Conversation(JsonConversation):
validated_value: str = None
@@ -38,13 +39,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = default_model
default_image_model = 'ImageGeneration'
image_models = [default_image_model, "ImageGeneration2"]
- vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
+ vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'deepseek-v3', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b']
+ reasoning_models = ['deepseek-r1']
userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'deepseek-r1', 'deepseek-v3', 'blackboxai-pro']
agentMode = {
'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
- #
'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
'Mistral-(7B)-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"},
'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"},
@@ -58,7 +59,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
- #
'Python Agent': {'mode': True, 'id': "Python Agent"},
'Java Agent': {'mode': True, 'id': "Java Agent"},
'JavaScript Agent': {'mode': True, 'id': "JavaScript Agent"},
@@ -72,11 +72,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'React Agent': {'mode': True, 'id': "React Agent"},
'Xcode Agent': {'mode': True, 'id': "Xcode Agent"},
'AngularJS Agent': {'mode': True, 'id': "AngularJS Agent"},
- #
'blackboxai-pro': {'mode': True, 'id': "BLACKBOXAI-PRO"},
- #
'repomap': {'mode': True, 'id': "repomap"},
- #
'Heroku Agent': {'mode': True, 'id': "Heroku Agent"},
'Godot Agent': {'mode': True, 'id': "Godot Agent"},
'Go Agent': {'mode': True, 'id': "Go Agent"},
@@ -96,10 +93,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
'builder Agent': {'mode': True, 'id': "builder Agent"},
}
- models = list(dict.fromkeys([default_model, *userSelectedModel, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
+ models = list(dict.fromkeys([default_model, *userSelectedModel, *reasoning_models, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())]))
model_aliases = {
- ### chat ###
"gpt-4": "gpt-4o",
"gemini-1.5-flash": "gemini-1.5-flash",
"gemini-1.5-pro": "gemini-pro",
@@ -110,22 +106,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"dbrx-instruct": "DBRX-Instruct",
"qwq-32b": "Qwen-QwQ-32B-Preview",
"hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO",
- "deepseek-chat": "deepseek-v3",
-
- ### image ###
"flux": "ImageGeneration",
- "flux": "ImageGeneration2",
}
@classmethod
- async def fetch_validated(
- cls,
- url: str = "https://www.blackbox.ai",
- force_refresh: bool = False
- ) -> Optional[str]:
- """
- Asynchronously retrieves the validated_value from the specified URL.
- """
+ async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
cache_file = Path(get_cookies_dir()) / 'blackbox.json'
if not force_refresh and cache_file.exists():
@@ -141,14 +126,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
uuid_pattern = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']'
def is_valid_context(text: str) -> bool:
- """Checks if the context is valid."""
return any(char + '=' in text for char in 'abcdefghijklmnopqrstuvwxyz')
async with ClientSession() as session:
try:
async with session.get(url) as response:
if response.status != 200:
- print("Failed to load the page.")
return None
page_content = await response.text()
@@ -167,7 +150,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if is_valid_context(context):
validated_value = match.group(1)
- # Save to cache
cache_file.parent.mkdir(exist_ok=True)
try:
with open(cache_file, 'w') as f:
@@ -183,10 +165,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
return None
@classmethod
- def generate_chat_id(cls) -> str:
- """Generate a random chat ID"""
+ def generate_id(cls, length: int = 7) -> str:
chars = string.ascii_letters + string.digits
- return ''.join(random.choice(chars) for _ in range(7))
+ return ''.join(random.choice(chars) for _ in range(length))
@classmethod
async def create_async_generator(
@@ -216,6 +197,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
async with ClientSession(headers=headers) as session:
if model == "ImageGeneration2":
+ prompt = format_image_prompt(messages, prompt)
data = {
"query": format_image_prompt(messages, prompt),
"agentMode": True
@@ -235,18 +217,28 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
image_url_match = re.search(r'!\[.*?\]\((.*?)\)', response_json["markdown"])
if image_url_match:
image_url = image_url_match.group(1)
- yield ImageResponse(images=[image_url], alt=prompt)
+ yield ImageResponse(images=[image_url], alt=format_image_prompt(messages, prompt))
return
if conversation is None or not hasattr(conversation, "chat_id"):
conversation = Conversation(model)
conversation.validated_value = await cls.fetch_validated()
- conversation.chat_id = cls.generate_chat_id()
+ conversation.chat_id = cls.generate_id()
conversation.message_history = []
-
- current_messages = [{"id": conversation.chat_id, "content": format_prompt(messages), "role": "user"}]
- conversation.message_history.extend(messages)
+ current_messages = []
+ for i, msg in enumerate(messages):
+ msg_id = conversation.chat_id if i == 0 and msg["role"] == "user" else cls.generate_id()
+ current_msg = {
+ "id": msg_id,
+ "content": msg["content"],
+ "role": msg["role"]
+ }
+ if msg["role"] == "assistant" and i == len(messages)-1:
+ current_time = datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z')
+ current_msg["createdAt"] = current_time
+ current_messages.append(current_msg)
+
if images is not None:
current_messages[-1]['data'] = {
"imagesData": [
@@ -279,6 +271,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"visitFromDelta": False,
+ "isMemoryEnabled": False,
"mobileClient": False,
"userSelectedModel": model if model in cls.userSelectedModel else None,
"validated": conversation.validated_value,
@@ -288,6 +281,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
"domains": None,
"vscodeClient": False,
"codeInterpreterMode": False,
+ "customProfile": {"name": "", "occupation": "", "traits": [], "additionalInfo": "", "enableNewChats": False},
"webSearchMode": web_search
}
@@ -300,21 +294,36 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if not text_to_yield or text_to_yield.isspace():
return
- full_response = ""
-
if model in cls.image_models:
image_url_match = re.search(r'!\[.*?\]\((.*?)\)', text_to_yield)
if image_url_match:
image_url = image_url_match.group(1)
- yield ImageResponse(image_url, format_image_prompt(messages, prompt))
+ prompt = format_image_prompt(messages, prompt)
+ yield ImageResponse(images=[image_url], alt=prompt)
else:
- if "" in text_to_yield and "" in text_to_yield:
- parts = text_to_yield.split('', 1)
- yield parts[0]
- reasoning_parts = parts[1].split('', 1)
- yield Reasoning(f"{reasoning_parts[0]}")
- yield reasoning_parts[1]
- full_response = text_to_yield
+ if model in cls.reasoning_models and "\n\n\n" in text_to_yield:
+ think_split = text_to_yield.split("\n\n\n", 1)
+ if len(think_split) > 1:
+ think_content, answer = think_split[0].strip(), think_split[1].strip()
+ yield Reasoning(status=think_content)
+ yield answer
+ else:
+ yield text_to_yield
+ elif "" in text_to_yield:
+ pre_think, rest = text_to_yield.split('', 1)
+ think_content, post_think = rest.split('', 1)
+
+ pre_think = pre_think.strip()
+ think_content = think_content.strip()
+ post_think = post_think.strip()
+
+ if pre_think:
+ yield pre_think
+ if think_content:
+ yield Reasoning(status=think_content)
+ if post_think:
+ yield post_think
+
elif "Generated by BLACKBOX.AI" in text_to_yield:
conversation.validated_value = await cls.fetch_validated(force_refresh=True)
if conversation.validated_value:
@@ -327,24 +336,13 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
if new_text and not new_text.isspace():
yield new_text
- full_response = new_text
else:
if text_to_yield and not text_to_yield.isspace():
yield text_to_yield
- full_response = text_to_yield
else:
if text_to_yield and not text_to_yield.isspace():
yield text_to_yield
- full_response = text_to_yield
- if full_response:
- if max_tokens and len(full_response) >= max_tokens:
- reason = "length"
- else:
- reason = "stop"
-
- if return_conversation:
- conversation.message_history.append({"role": "assistant", "content": full_response})
- yield conversation
-
- yield FinishReason(reason)
+ if return_conversation:
+ conversation.message_history.append({"role": "assistant", "content": text_to_yield})
+ yield conversation
diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py
index 9b968e9f..8c3b8af5 100644
--- a/g4f/Provider/DeepInfraChat.py
+++ b/g4f/Provider/DeepInfraChat.py
@@ -27,7 +27,7 @@ class DeepInfraChat(OpenaiTemplate):
"llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
- "deepseek-chat": "deepseek-ai/DeepSeek-V3",
+ "deepseek-v3": "deepseek-ai/DeepSeek-V3",
"qwq-32b": "Qwen/QwQ-32B-Preview",
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
"wizardlm-2-7b": "microsoft/WizardLM-2-7B",
diff --git a/g4f/Provider/Glider.py b/g4f/Provider/Glider.py
new file mode 100644
index 00000000..dc85b3ba
--- /dev/null
+++ b/g4f/Provider/Glider.py
@@ -0,0 +1,120 @@
+from __future__ import annotations
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..requests.raise_for_status import raise_for_status
+from ..providers.response import FinishReason, Reasoning
+from .helper import format_prompt
+
+class Glider(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Glider"
+ url = "https://glider.so"
+ api_endpoint = "https://glider.so/api/chat"
+
+ working = True
+ needs_auth = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'chat-llama-3-1-70b'
+ reasoning_models = ['deepseek-ai/DeepSeek-R1']
+ models = [
+ 'chat-llama-3-1-70b',
+ 'chat-llama-3-1-8b',
+ 'chat-llama-3-2-3b',
+ ] + reasoning_models
+
+ model_aliases = {
+ "llama-3.1-70b": "chat-llama-3-1-70b",
+ "llama-3.1-8b": "chat-llama-3-1-8b",
+ "llama-3.2-3b": "chat-llama-3-2-3b",
+ "deepseek-r1": "deepseek-ai/DeepSeek-R1",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [{
+ "role": "user",
+ "content": format_prompt(messages),
+ "id": "",
+ "chatId": "",
+ "createdOn": "",
+ "model": None
+ }],
+ "model": model
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+
+ is_reasoning = False
+ current_reasoning = ""
+
+ async for chunk in response.content:
+ if not chunk:
+ continue
+
+ text = chunk.decode(errors="ignore")
+
+ if not text.startswith("data: "):
+ continue
+
+ if "[DONE]" in text:
+ if is_reasoning and current_reasoning:
+ yield Reasoning(status=current_reasoning.strip())
+ yield FinishReason("stop")
+ return
+
+ try:
+ json_data = json.loads(text[6:])
+ content = json_data["choices"][0].get("delta", {}).get("content", "")
+
+ if model in cls.reasoning_models:
+ if "" in content:
+ content = content.replace("", "")
+ is_reasoning = True
+ current_reasoning = content
+ continue
+
+ if "" in content:
+ content = content.replace("", "")
+ is_reasoning = False
+ current_reasoning += content
+ yield Reasoning(status=current_reasoning.strip())
+ current_reasoning = ""
+ continue
+
+ if is_reasoning:
+ current_reasoning += content
+ continue
+
+ if content:
+ yield content
+
+ except json.JSONDecodeError:
+ continue
+ except Exception:
+ yield FinishReason("error")
+ return
diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py
index d8477064..1d06784d 100644
--- a/g4f/Provider/PerplexityLabs.py
+++ b/g4f/Provider/PerplexityLabs.py
@@ -21,10 +21,6 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
"sonar",
"sonar-reasoning",
]
- model_aliases = {
- "sonar-online": default_model,
- "sonar-chat": default_model,
- }
@classmethod
async def create_async_generator(
diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index 12268288..af85eef7 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -13,7 +13,7 @@ from ..typing import AsyncResult, Messages, ImagesType
from ..image import to_data_uri
from ..requests.raise_for_status import raise_for_status
from ..requests.aiohttp import get_connector
-from ..providers.response import ImageResponse, FinishReason, Usage
+from ..providers.response import ImageResponse, FinishReason, Usage, Reasoning
DEFAULT_HEADERS = {
'Accept': '*/*',
@@ -40,7 +40,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = "gpt-4o"
extra_image_models = ["midjourney", "dall-e-3", "flux-pro"]
vision_models = [default_vision_model, "gpt-4o-mini"]
- extra_text_models = ["claude", "claude-email", "deepseek-reasoner", "p1"] + vision_models
+ reasoning_models = ['deepseek-reasoner', 'deepseek-r1']
+ extra_text_models = ["claude", "claude-email", "p1"] + vision_models + reasoning_models
model_aliases = {
### Text Models ###
"gpt-4o-mini": "openai",
@@ -50,11 +51,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"qwen-2.5-coder-32b": "qwen-coder",
"llama-3.3-70b": "llama",
"mistral-nemo": "mistral",
- #"mistral-nemo": "unity", # bug with image url response
- #"gpt-4o-mini": "midijourney", # bug with the answer
"gpt-4o-mini": "rtist",
"gpt-4o": "searchgpt",
- #"mistral-nemo": "evil",
"gpt-4o-mini": "p1",
"deepseek-chat": "deepseek",
"deepseek-chat": "claude-hybridspace",
@@ -72,24 +70,25 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
@classmethod
def get_models(cls, **kwargs):
- # Fetch image models if not cached
if not cls.image_models:
url = "https://image.pollinations.ai/models"
response = requests.get(url)
raise_for_status(response)
cls.image_models = response.json()
- cls.image_models.extend(cls.extra_image_models)
-
- # Fetch text models if not cached
+ cls.image_models = list(dict.fromkeys([*cls.image_models, *cls.extra_image_models]))
+
if not cls.text_models:
url = "https://text.pollinations.ai/models"
response = requests.get(url)
raise_for_status(response)
- cls.text_models = [model.get("name") for model in response.json()]
- cls.text_models.extend(cls.extra_text_models)
-
- # Return combined models
- return cls.text_models + cls.image_models
+ original_text_models = [model.get("name") for model in response.json()]
+ combined_text = cls.extra_text_models + [
+ model for model in original_text_models
+ if model not in cls.extra_text_models
+ ]
+ cls.text_models = list(dict.fromkeys(combined_text))
+
+ return list(dict.fromkeys([*cls.text_models, *cls.image_models]))
@classmethod
async def create_async_generator(
@@ -97,7 +96,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
model: str,
messages: Messages,
proxy: str = None,
- # Image specific parameters
prompt: str = None,
width: int = 1024,
height: int = 1024,
@@ -106,7 +104,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
private: bool = False,
enhance: bool = False,
safe: bool = False,
- # Text specific parameters
images: ImagesType = None,
temperature: float = None,
presence_penalty: float = None,
@@ -122,8 +119,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
if not cache and seed is None:
seed = random.randint(0, 100000)
- # Check if models
- # Image generation
if model in cls.image_models:
yield await cls._generate_image(
model=model,
@@ -138,7 +133,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
safe=safe
)
else:
- # Text generation
async for result in cls._generate_text(
model=model,
messages=messages,
@@ -227,7 +221,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"top_p": top_p,
"frequency_penalty": frequency_penalty,
"jsonMode": jsonMode,
- "stream": False, # To get more informations like Usage and FinishReason
+ "stream": False,
"seed": seed,
"cache": cache
}
@@ -235,24 +229,29 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
await raise_for_status(response)
async for line in response.content:
decoded_chunk = line.decode(errors="replace")
- # If [DONE].
if "data: [DONE]" in decoded_chunk:
break
- # Processing JSON format
try:
- # Remove the prefix “data: “ and parse JSON
json_str = decoded_chunk.replace("data:", "").strip()
data = json.loads(json_str)
choice = data["choices"][0]
+ message = choice.get("message") or choice.get("delta", {})
+
+ # Handle reasoning content
+ if model in cls.reasoning_models:
+ if "reasoning_content" in message:
+ yield Reasoning(status=message["reasoning_content"].strip())
+
if "usage" in data:
yield Usage(**data["usage"])
- if "message" in choice and "content" in choice["message"] and choice["message"]["content"]:
- yield choice["message"]["content"].replace("\\(", "(").replace("\\)", ")")
- elif "delta" in choice and "content" in choice["delta"] and choice["delta"]["content"]:
- yield choice["delta"]["content"].replace("\\(", "(").replace("\\)", ")")
- if "finish_reason" in choice and choice["finish_reason"] is not None:
+ content = message.get("content", "")
+ if content:
+ yield content.replace("\\(", "(").replace("\\)", ")")
+ if "finish_reason" in choice and choice["finish_reason"]:
yield FinishReason(choice["finish_reason"])
break
except json.JSONDecodeError:
yield decoded_chunk.strip()
- continue
+ except Exception as e:
+ yield FinishReason("error")
+ break
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 55ee07e0..c7ad606f 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -14,9 +14,6 @@ from .hf_space import HuggingSpace
from .mini_max import HailuoAI, MiniMax
from .template import OpenaiTemplate, BackendApi
-from .AIChatFree import AIChatFree
-from .AIUncensored import AIUncensored
-from .AutonomousAI import AutonomousAI
from .Blackbox import Blackbox
from .CablyAI import CablyAI
from .ChatGLM import ChatGLM
@@ -31,6 +28,7 @@ from .DeepInfraChat import DeepInfraChat
from .Free2GPT import Free2GPT
from .FreeGpt import FreeGpt
from .GizAI import GizAI
+from .Glider import Glider
from .GPROChat import GPROChat
from .ImageLabs import ImageLabs
from .Jmuz import Jmuz
diff --git a/g4f/Provider/not_working/AIChatFree.py b/g4f/Provider/not_working/AIChatFree.py
new file mode 100644
index 00000000..b1d6b40a
--- /dev/null
+++ b/g4f/Provider/not_working/AIChatFree.py
@@ -0,0 +1,73 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ...errors import RateLimitError
+from ...requests import raise_for_status
+from ...requests.aiohttp import get_connector
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aichatfree.info"
+
+ working = False
+ supports_stream = True
+ supports_message_history = True
+
+ default_model = 'gemini-1.5-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/not_working/AIUncensored.py b/g4f/Provider/not_working/AIUncensored.py
new file mode 100644
index 00000000..686d5059
--- /dev/null
+++ b/g4f/Provider/not_working/AIUncensored.py
@@ -0,0 +1,116 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import time
+import hmac
+import hashlib
+import json
+import random
+
+from ...typing import AsyncResult, Messages
+from ...requests.raise_for_status import raise_for_status
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+from ...providers.response import FinishReason
+
+class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.aiuncensored.info/ai_uncensored"
+ api_key = "62852b00cb9e44bca86f0ec7e7455dc6"
+
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = "hermes3-70b"
+ models = [default_model]
+
+ model_aliases = {"hermes-3": "hermes3-70b"}
+
+ @staticmethod
+ def calculate_signature(timestamp: str, json_dict: dict) -> str:
+ message = f"{timestamp}{json.dumps(json_dict)}"
+ secret_key = b'your-super-secret-key-replace-in-production'
+ signature = hmac.new(
+ secret_key,
+ message.encode('utf-8'),
+ hashlib.sha256
+ ).hexdigest()
+ return signature
+
+ @staticmethod
+ def get_server_url() -> str:
+ servers = [
+ "https://llm-server-nov24-ibak.onrender.com",
+ "https://llm-server-nov24-qv2w.onrender.com",
+ "https://llm-server-nov24.onrender.com"
+ ]
+ return random.choice(servers)
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ proxy: str = None,
+ api_key: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ timestamp = str(int(time.time()))
+
+ json_dict = {
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "model": model,
+ "stream": stream
+ }
+
+ signature = cls.calculate_signature(timestamp, json_dict)
+
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'origin': 'https://www.aiuncensored.info',
+ 'referer': 'https://www.aiuncensored.info/',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
+ 'x-api-key': cls.api_key,
+ 'x-timestamp': timestamp,
+ 'x-signature': signature
+ }
+
+ url = f"{cls.get_server_url()}/api/chat"
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(url, json=json_dict, proxy=proxy) as response:
+ await raise_for_status(response)
+
+ if stream:
+ full_response = ""
+ async for line in response.content:
+ if line:
+ try:
+ line_text = line.decode('utf-8')
+ if line_text.startswith(''):
+ data = line_text[6:]
+ if data == '[DONE]':
+ yield FinishReason("stop")
+ break
+ try:
+ json_data = json.loads(data)
+ if 'data' in json_data:
+ yield json_data['data']
+ full_response += json_data['data']
+ except json.JSONDecodeError:
+ continue
+ except UnicodeDecodeError:
+ continue
+ if full_response:
+ yield FinishReason("length")
+ else:
+ response_json = await response.json()
+ if 'content' in response_json:
+ yield response_json['content']
+ yield FinishReason("length")
diff --git a/g4f/Provider/not_working/AutonomousAI.py b/g4f/Provider/not_working/AutonomousAI.py
new file mode 100644
index 00000000..340dfae5
--- /dev/null
+++ b/g4f/Provider/not_working/AutonomousAI.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import base64
+import json
+
+from ...typing import AsyncResult, Messages
+from ...requests.raise_for_status import raise_for_status
+from ...providers.response import FinishReason
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.autonomous.ai/anon/"
+ api_endpoints = {
+ "llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
+ "qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
+ "hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes",
+ "vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision",
+ "summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary"
+ }
+
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = "llama"
+ models = [default_model, "qwen_coder", "hermes", "vision", "summary"]
+
+ model_aliases = {
+ "llama-3.3-70b": default_model,
+ "qwen-2.5-coder-32b": "qwen_coder",
+ "hermes-3": "hermes",
+ "llama-3.2-90b": "vision",
+ "llama-3.2-70b": "summary",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ stream: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ api_endpoint = cls.api_endpoints[model]
+ headers = {
+ 'accept': '*/*',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'content-type': 'application/json',
+ 'country-code': 'US',
+ 'origin': 'https://www.autonomous.ai',
+ 'referer': 'https://www.autonomous.ai/',
+ 'time-zone': 'America/New_York',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ message_json = json.dumps(messages)
+ encoded_message = base64.b64encode(message_json.encode()).decode(errors="ignore")
+
+ data = {
+ "messages": encoded_message,
+ "threadId": model,
+ "stream": stream,
+ "aiAgent": model
+ }
+
+ async with session.post(api_endpoint, json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in response.content:
+ if chunk:
+ chunk_str = chunk.decode()
+ if chunk_str == "data: [DONE]":
+ continue
+
+ try:
+ # Remove "data: " prefix and parse JSON
+ chunk_data = json.loads(chunk_str.replace("data: ", ""))
+ if "choices" in chunk_data and chunk_data["choices"]:
+ delta = chunk_data["choices"][0].get("delta", {})
+ if "content" in delta and delta["content"]:
+ yield delta["content"]
+ if "finish_reason" in chunk_data and chunk_data["finish_reason"]:
+ yield FinishReason(chunk_data["finish_reason"])
+ except json.JSONDecodeError:
+ continue
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
index 3e0e04e2..6dfe69f7 100644
--- a/g4f/Provider/not_working/__init__.py
+++ b/g4f/Provider/not_working/__init__.py
@@ -1,7 +1,10 @@
from .AI365VIP import AI365VIP
+from .AIChatFree import AIChatFree
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
from .Airforce import Airforce
+from .AutonomousAI import AutonomousAI
+from .AIUncensored import AIUncensored
from .AmigoChat import AmigoChat
from .Aura import Aura
from .Chatgpt4o import Chatgpt4o
diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css
index 45323999..b4832aad 100644
--- a/g4f/gui/client/static/css/style.css
+++ b/g4f/gui/client/static/css/style.css
@@ -1413,6 +1413,49 @@ form .field.saved .fa-xmark {
line-height: 30px;
}
+.collapsible {
+ border: 1px solid var(--blur-border);
+ border-radius: var(--border-radius-1);
+ overflow: hidden;
+ margin-bottom: 10px;
+}
+
+.collapsible-header {
+ padding: 10px;
+ cursor: pointer;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ background-color: var(--blur-bg);
+}
+
+.collapsible-header:hover {
+ background-color: var(--button-hover);
+}
+
+.collapsible-content {
+ padding: 10px;
+}
+
+.collapsible-content.hidden {
+ display: none;
+}
+
+.provider-item {
+ padding: 5px 0;
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+}
+
+.fa-chevron-down {
+ transition: transform 0.3s ease;
+}
+
+.collapsible-header.active .fa-chevron-down {
+ transform: rotate(180deg);
+}
+
::-webkit-scrollbar-track {
background: var(--scrollbar);
}
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 15faf1c7..c8642c27 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -1961,43 +1961,75 @@ async function on_api() {
}
}
});
+
+ let providersContainer = document.createElement("div");
+ providersContainer.classList.add("field", "collapsible");
+ providersContainer.innerHTML = `
+
+
+ `;
+ settings.querySelector(".paper").appendChild(providersContainer);
+
providers.forEach((provider) => {
if (!provider.parent) {
- option = document.createElement("div");
- option.classList.add("field");
+ let option = document.createElement("div");
+ option.classList.add("provider-item");
option.innerHTML = `
Enable ${provider.label}
`;
option.querySelector("input").addEventListener("change", (event) => load_provider_option(event.target, provider.name));
- settings.querySelector(".paper").appendChild(option);
+ providersContainer.querySelector(".collapsible-content").appendChild(option);
provider_options[provider.name] = option;
}
});
+
+ providersContainer.querySelector(".collapsible-header").addEventListener('click', (e) => {
+ providersContainer.querySelector(".collapsible-content").classList.toggle('hidden');
+ providersContainer.querySelector(".collapsible-header").classList.toggle('active');
+ });
}
+
if (appStorage.getItem("provider")) {
await load_provider_models(appStorage.getItem("provider"))
} else {
providerSelect.selectedIndex = 0;
}
+
+ let providersListContainer = document.createElement("div");
+ providersListContainer.classList.add("field", "collapsible");
+ providersListContainer.innerHTML = `
+
+
+ `;
+ settings.querySelector(".paper").appendChild(providersListContainer);
+
for (let [name, [label, login_url, childs]] of Object.entries(login_urls)) {
if (!login_url && !is_demo) {
continue;
}
- option = document.createElement("div");
- option.classList.add("field", "box");
- if (!is_demo) {
- option.classList.add("hidden");
- }
- childs = childs.map((child)=>`${child}-api_key`).join(" ");
- option.innerHTML = `
+ let providerBox = document.createElement("div");
+ providerBox.classList.add("field", "box");
+ childs = childs.map((child) => `${child}-api_key`).join(" ");
+ providerBox.innerHTML = `
` + (login_url ? `Get API key` : "");
- settings.querySelector(".paper").appendChild(option);
+ providersListContainer.querySelector(".collapsible-content").appendChild(providerBox);
}
+ providersListContainer.querySelector(".collapsible-header").addEventListener('click', (e) => {
+ providersListContainer.querySelector(".collapsible-content").classList.toggle('hidden');
+ providersListContainer.querySelector(".collapsible-header").classList.toggle('active');
+ });
+
register_settings_storage();
await load_settings_storage();
Object.entries(provider_options).forEach(
@@ -2596,4 +2628,4 @@ document.getElementById("showLog").addEventListener("click", ()=> {
log_storage.classList.remove("hidden");
settings.classList.add("hidden");
log_storage.scrollTop = log_storage.scrollHeight;
-});
\ No newline at end of file
+});
diff --git a/g4f/models.py b/g4f/models.py
index 183be9a1..c127c302 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -5,9 +5,6 @@ from dataclasses import dataclass
from .Provider import IterListProvider, ProviderType
from .Provider import (
### no auth required ###
- AIChatFree,
- AIUncensored,
- AutonomousAI,
Blackbox,
CablyAI,
ChatGLM,
@@ -19,6 +16,7 @@ from .Provider import (
DDG,
DeepInfraChat,
HuggingSpace,
+ Glider,
GPROChat,
ImageLabs,
Jmuz,
@@ -89,7 +87,6 @@ default = Model(
CablyAI,
OIVSCode,
DarkAI,
- AIUncensored,
OpenaiChat,
Cloudflare,
])
@@ -110,16 +107,16 @@ default_vision = Model(
], shuffle=False)
)
-############
-### Text ###
-############
+###################
+### Text/Vision ###
+###################
### OpenAI ###
# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'OpenAI',
- best_provider = IterListProvider([DarkAI])
+ best_provider = DarkAI
)
# gpt-4
@@ -198,13 +195,13 @@ llama_3_70b = Model(
llama_3_1_8b = Model(
name = "llama-3.1-8b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, PollinationsAI, Cloudflare])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, Glider, Jmuz, PollinationsAI, Cloudflare])
)
llama_3_1_70b = Model(
name = "llama-3.1-70b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([DDG, Jmuz, Blackbox, TeachAnything, DarkAI])
+ best_provider = IterListProvider([DDG, Blackbox, Glider, Jmuz, TeachAnything, DarkAI])
)
llama_3_1_405b = Model(
@@ -220,29 +217,29 @@ llama_3_2_1b = Model(
best_provider = Cloudflare
)
-llama_3_2_11b = VisionModel(
- name = "llama-3.2-11b",
+llama_3_2_3b = Model(
+ name = "llama-3.2-3b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Jmuz, HuggingChat, HuggingFace])
+ best_provider = Glider
)
-llama_3_2_70b = Model(
- name = "llama-3.2-70b",
+llama_3_2_11b = VisionModel(
+ name = "llama-3.2-11b",
base_provider = "Meta Llama",
- best_provider = AutonomousAI
+ best_provider = IterListProvider([Jmuz, HuggingChat, HuggingFace])
)
llama_3_2_90b = Model(
name = "llama-3.2-90b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Jmuz, AutonomousAI])
+ best_provider = Jmuz
)
# llama 3.3
llama_3_3_70b = Model(
name = "llama-3.3-70b",
base_provider = "Meta Llama",
- best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([Blackbox, DeepInfraChat, PollinationsAI, Jmuz, HuggingChat, HuggingFace])
)
### Mistral ###
@@ -271,12 +268,6 @@ hermes_2_dpo = Model(
best_provider = Blackbox
)
-hermes_3 = Model(
- name = "hermes-3",
- base_provider = "NousResearch",
- best_provider = IterListProvider([AutonomousAI, AIUncensored])
-)
-
### Microsoft ###
# phi
@@ -324,7 +315,7 @@ gemini_1_5_flash = Model(
gemini_1_5_pro = Model(
name = 'gemini-1.5-pro',
base_provider = 'Google DeepMind',
- best_provider = IterListProvider([Blackbox, Jmuz, GPROChat, AIChatFree, Gemini, GeminiPro, Liaobots])
+ best_provider = IterListProvider([Blackbox, Jmuz, GPROChat, Gemini, GeminiPro, Liaobots])
)
# gemini-2.0
@@ -340,12 +331,6 @@ gemini_2_0_flash_thinking = Model(
best_provider = Liaobots
)
-gemma_2_27b = Model(
- name = 'gemma-2-27b',
- base_provider = 'Google DeepMind',
- best_provider = HuggingFace
-)
-
### Anthropic ###
# claude 3
claude_3_haiku = Model(
@@ -437,7 +422,7 @@ qwen_2_5_72b = Model(
qwen_2_5_coder_32b = Model(
name = 'qwen-2.5-coder-32b',
base_provider = 'Qwen',
- best_provider = IterListProvider([DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat])
+ best_provider = IterListProvider([DeepInfraChat, PollinationsAI, Jmuz, HuggingChat])
)
qwen_2_5_1m = Model(
name = 'qwen-2.5-1m-demo',
@@ -468,12 +453,19 @@ pi = Model(
deepseek_chat = Model(
name = 'deepseek-chat',
base_provider = 'DeepSeek',
- best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, PollinationsAI])
+ best_provider = IterListProvider([Blackbox, Jmuz, PollinationsAI])
+)
+
+deepseek_v3 = Model(
+ name = 'deepseek-v3',
+ base_provider = 'DeepSeek',
+ best_provider = IterListProvider([Blackbox, DeepInfraChat])
)
+
deepseek_r1 = Model(
name = 'deepseek-r1',
base_provider = 'DeepSeek',
- best_provider = IterListProvider([Blackbox, Jmuz, PollinationsAI, HuggingChat, HuggingFace])
+ best_provider = IterListProvider([Blackbox, Glider, PollinationsAI, Jmuz, HuggingChat, HuggingFace])
)
### x.ai ###
@@ -496,6 +488,12 @@ sonar_pro = Model(
best_provider = PerplexityLabs
)
+sonar_reasoning = Model(
+ name = 'sonar-reasoning',
+ base_provider = 'Perplexity AI',
+ best_provider = PerplexityLabs
+)
+
### Nvidia ###
nemotron_70b = Model(
name = 'nemotron-70b',
@@ -651,8 +649,8 @@ class ModelUtils:
# llama-3.2
llama_3_2_1b.name: llama_3_2_1b,
+ llama_3_2_3b.name: llama_3_2_3b,
llama_3_2_11b.name: llama_3_2_11b,
- llama_3_2_70b.name: llama_3_2_70b,
llama_3_2_90b.name: llama_3_2_90b,
# llama-3.3
@@ -665,7 +663,6 @@ class ModelUtils:
### NousResearch ###
hermes_2_dpo.name: hermes_2_dpo,
- hermes_3.name: hermes_3,
### Microsoft ###
# phi
@@ -683,8 +680,6 @@ class ModelUtils:
gemini_1_5_flash.name: gemini_1_5_flash,
gemini_2_0_flash.name: gemini_2_0_flash,
gemini_2_0_flash_thinking.name: gemini_2_0_flash_thinking,
- ### Gemma
- gemma_2_27b.name: gemma_2_27b,
### Anthropic ###
# claude 3
@@ -731,9 +726,11 @@ class ModelUtils:
### Perplexity AI ###
sonar.name: sonar,
sonar_pro.name: sonar_pro,
+ sonar_reasoning.name: sonar_reasoning,
### DeepSeek ###
deepseek_chat.name: deepseek_chat,
+ deepseek_v3.name: deepseek_v3,
deepseek_r1.name: deepseek_r1,
nemotron_70b.name: nemotron_70b, ### Nvidia ###
@@ -765,6 +762,7 @@ class ModelUtils:
midjourney.name: midjourney,
}
+
demo_models = {
gpt_4o.name: [gpt_4o, [PollinationsAI, Blackbox]],
"default": [llama_3_2_11b, [HuggingFaceAPI]],
@@ -775,7 +773,6 @@ demo_models = {
command_r.name: [command_r, [HuggingSpace]],
command_r_plus.name: [command_r_plus, [HuggingSpace]],
command_r7b.name: [command_r7b, [HuggingSpace]],
- gemma_2_27b.name: [gemma_2_27b, [HuggingFaceAPI]],
qwen_2_72b.name: [qwen_2_72b, [HuggingSpace]],
qwen_2_5_coder_32b.name: [qwen_2_5_coder_32b, [HuggingFace]],
qwq_32b.name: [qwq_32b, [HuggingFace]],
--
cgit v1.2.3