From f105451947cbea2641444777a9ae8b68722ad4eb Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Wed, 15 Jan 2025 21:35:05 +0000 Subject: Provider Updates and Fixes (#2570) * Update providers, restore old providers, remove non-working providers * Restoring the original providers * Restore the original provider g4f/Provider/needs_auth/GeminiPro.py * Deleted non-working providers, fixed providers * Update docs/providers-and-models.md g4f/models.py g4f/Provider/hf_space/CohereForAI.py * Restore g4f/Provider/Airforce.py Updated alias g4f/Provider/hf_space/CohereForAI.py * Disabled provider 'g4f/Provider/ReplicateHome.py' and moved to 'g4f/Provider/not_working' * Disconnected provider problem with Pizzagpt response * Fix for why web_search = True didn't work * Update docs/client.md * Fix for why web_search = True did not work in the asychronous and sychronous versions --------- Co-authored-by: kqlio67 <> --- g4f/Provider/AIChatFree.py | 73 ++++ g4f/Provider/AIUncensored.py | 116 +++++++ g4f/Provider/Airforce.py | 2 +- g4f/Provider/AmigoChat.py | 250 -------------- g4f/Provider/Blackbox.py | 372 +++++++++++---------- g4f/Provider/ClaudeSon.py | 43 --- g4f/Provider/DDG.py | 11 +- g4f/Provider/DeepInfraChat.py | 88 ----- g4f/Provider/GPROChat.py | 69 ++++ g4f/Provider/Pizzagpt.py | 12 +- g4f/Provider/ReplicateHome.py | 122 ------- g4f/Provider/Yqcloud.py | 89 +++++ g4f/Provider/__init__.py | 8 +- g4f/Provider/hf_space/CohereForAI.py | 9 +- g4f/Provider/needs_auth/DeepInfra.py | 56 +++- g4f/Provider/needs_auth/DeepInfraImage.py | 81 ----- g4f/Provider/needs_auth/GigaChat.py | 136 ++++++++ g4f/Provider/needs_auth/HuggingChat.py | 2 - g4f/Provider/needs_auth/__init__.py | 4 +- g4f/Provider/needs_auth/gigachat/GigaChat.py | 92 ----- g4f/Provider/needs_auth/gigachat/__init__.py | 2 - .../gigachat/russian_trusted_root_ca_pem.crt | 33 -- g4f/Provider/not_working/AIChatFree.py | 76 ----- g4f/Provider/not_working/AIUncensored.py | 132 -------- g4f/Provider/not_working/Ai4Chat.py | 89 ----- g4f/Provider/not_working/AmigoChat.py | 251 ++++++++++++++ g4f/Provider/not_working/DeepInfraChat.py | 88 +++++ g4f/Provider/not_working/GPROChat.py | 67 ---- g4f/Provider/not_working/ReplicateHome.py | 123 +++++++ g4f/Provider/not_working/__init__.py | 6 +- g4f/models.py | 76 ++--- g4f/tools/run_tools.py | 30 +- 32 files changed, 1259 insertions(+), 1349 deletions(-) create mode 100644 g4f/Provider/AIChatFree.py create mode 100644 g4f/Provider/AIUncensored.py delete mode 100644 g4f/Provider/AmigoChat.py delete mode 100644 g4f/Provider/ClaudeSon.py delete mode 100644 g4f/Provider/DeepInfraChat.py create mode 100644 g4f/Provider/GPROChat.py delete mode 100644 g4f/Provider/ReplicateHome.py create mode 100644 g4f/Provider/Yqcloud.py delete mode 100644 g4f/Provider/needs_auth/DeepInfraImage.py create mode 100644 g4f/Provider/needs_auth/GigaChat.py delete mode 100644 g4f/Provider/needs_auth/gigachat/GigaChat.py delete mode 100644 g4f/Provider/needs_auth/gigachat/__init__.py delete mode 100644 g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt delete mode 100644 g4f/Provider/not_working/AIChatFree.py delete mode 100644 g4f/Provider/not_working/AIUncensored.py delete mode 100644 g4f/Provider/not_working/Ai4Chat.py create mode 100644 g4f/Provider/not_working/AmigoChat.py create mode 100644 g4f/Provider/not_working/DeepInfraChat.py delete mode 100644 g4f/Provider/not_working/GPROChat.py create mode 100644 g4f/Provider/not_working/ReplicateHome.py (limited to 'g4f') diff --git a/g4f/Provider/AIChatFree.py b/g4f/Provider/AIChatFree.py new file mode 100644 index 00000000..0e6f394a --- /dev/null +++ b/g4f/Provider/AIChatFree.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +import time +from hashlib import sha256 + +from aiohttp import BaseConnector, ClientSession + +from ..errors import RateLimitError +from ..requests import raise_for_status +from ..requests.aiohttp import get_connector +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin + + +class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://aichatfree.info" + + working = True + supports_stream = True + supports_message_history = True + + default_model = 'gemini-1.5-pro' + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + connector: BaseConnector = None, + **kwargs, + ) -> AsyncResult: + headers = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0", + "Accept": "*/*", + "Accept-Language": "en-US,en;q=0.5", + "Accept-Encoding": "gzip, deflate, br", + "Content-Type": "text/plain;charset=UTF-8", + "Referer": f"{cls.url}/", + "Origin": cls.url, + } + async with ClientSession( + connector=get_connector(connector, proxy), headers=headers + ) as session: + timestamp = int(time.time() * 1e3) + data = { + "messages": [ + { + "role": "model" if message["role"] == "assistant" else "user", + "parts": [{"text": message["content"]}], + } + for message in messages + ], + "time": timestamp, + "pass": None, + "sign": generate_signature(timestamp, messages[-1]["content"]), + } + async with session.post( + f"{cls.url}/api/generate", json=data, proxy=proxy + ) as response: + if response.status == 500: + if "Quota exceeded" in await response.text(): + raise RateLimitError( + f"Response {response.status}: Rate limit reached" + ) + await raise_for_status(response) + async for chunk in response.content.iter_any(): + yield chunk.decode(errors="ignore") + + +def generate_signature(time: int, text: str, secret: str = ""): + message = f"{time}:{text}:{secret}" + return sha256(message.encode()).hexdigest() diff --git a/g4f/Provider/AIUncensored.py b/g4f/Provider/AIUncensored.py new file mode 100644 index 00000000..8ff38c22 --- /dev/null +++ b/g4f/Provider/AIUncensored.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import time +import hmac +import hashlib +import json +import random + +from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt +from ..providers.response import FinishReason + +class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://www.aiuncensored.info/ai_uncensored" + api_key = "62852b00cb9e44bca86f0ec7e7455dc6" + + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = "hermes3-70b" + models = [default_model] + + model_aliases = {"hermes-3": "hermes3-70b"} + + @staticmethod + def calculate_signature(timestamp: str, json_dict: dict) -> str: + message = f"{timestamp}{json.dumps(json_dict)}" + secret_key = b'your-super-secret-key-replace-in-production' + signature = hmac.new( + secret_key, + message.encode('utf-8'), + hashlib.sha256 + ).hexdigest() + return signature + + @staticmethod + def get_server_url() -> str: + servers = [ + "https://llm-server-nov24-ibak.onrender.com", + "https://llm-server-nov24-qv2w.onrender.com", + "https://llm-server-nov24.onrender.com" + ] + return random.choice(servers) + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = False, + proxy: str = None, + api_key: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + timestamp = str(int(time.time())) + + json_dict = { + "messages": [{"role": "user", "content": format_prompt(messages)}], + "model": model, + "stream": stream + } + + signature = cls.calculate_signature(timestamp, json_dict) + + headers = { + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'origin': 'https://www.aiuncensored.info', + 'referer': 'https://www.aiuncensored.info/', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', + 'x-api-key': cls.api_key, + 'x-timestamp': timestamp, + 'x-signature': signature + } + + url = f"{cls.get_server_url()}/api/chat" + + async with ClientSession(headers=headers) as session: + async with session.post(url, json=json_dict, proxy=proxy) as response: + await raise_for_status(response) + + if stream: + full_response = "" + async for line in response.content: + if line: + try: + line_text = line.decode('utf-8') + if line_text.startswith(''): + data = line_text[6:] + if data == '[DONE]': + yield FinishReason("stop") + break + try: + json_data = json.loads(data) + if 'data' in json_data: + yield json_data['data'] + full_response += json_data['data'] + except json.JSONDecodeError: + continue + except UnicodeDecodeError: + continue + if full_response: + yield FinishReason("length") + else: + response_json = await response.json() + if 'content' in response_json: + yield response_json['content'] + yield FinishReason("length") diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index 88b6f439..23430b25 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -31,7 +31,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): api_endpoint_completions = "https://api.airforce/chat/completions" api_endpoint_imagine2 = "https://api.airforce/imagine2" - working = False + working = True supports_stream = True supports_system_message = True supports_message_history = True diff --git a/g4f/Provider/AmigoChat.py b/g4f/Provider/AmigoChat.py deleted file mode 100644 index b5be5724..00000000 --- a/g4f/Provider/AmigoChat.py +++ /dev/null @@ -1,250 +0,0 @@ -from __future__ import annotations - -import json -import uuid - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..image import ImageResponse -from ..requests import StreamSession, raise_for_status -from ..errors import ResponseStatusError - -MODELS = { - 'chat': { - 'gpt-4o-2024-11-20': {'persona_id': "gpt"}, - 'gpt-4o': {'persona_id': "summarizer"}, - 'gpt-4o-mini': {'persona_id': "amigo"}, - - 'o1-preview-': {'persona_id': "openai-o-one"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - 'o1-preview-2024-09-12-': {'persona_id': "orion"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - 'o1-mini-': {'persona_id': "openai-o-one-mini"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - - 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': {'persona_id': "llama-three-point-one"}, - 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': {'persona_id': "llama-3-2"}, - 'codellama/CodeLlama-34b-Instruct-hf': {'persona_id': "codellama-CodeLlama-34b-Instruct-hf"}, - - 'gemini-1.5-pro': {'persona_id': "gemini-1-5-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - 'gemini-1.5-flash': {'persona_id': "gemini-1.5-flash"}, - - 'claude-3-5-sonnet-20240620': {'persona_id': "claude"}, - 'claude-3-5-sonnet-20241022': {'persona_id': "clude-claude-3-5-sonnet-20241022"}, - 'claude-3-5-haiku-latest': {'persona_id': "3-5-haiku"}, - - 'Qwen/Qwen2.5-72B-Instruct-Turbo': {'persona_id': "qwen-2-5"}, - - 'google/gemma-2b-it': {'persona_id': "google-gemma-2b-it"}, - 'google/gemma-7b': {'persona_id': "google-gemma-7b"}, # Error handling AIML chat completion stream - - 'Gryphe/MythoMax-L2-13b': {'persona_id': "Gryphe-MythoMax-L2-13b"}, - - 'mistralai/Mistral-7B-Instruct-v0.3': {'persona_id': "mistralai-Mistral-7B-Instruct-v0.1"}, - 'mistralai/mistral-tiny': {'persona_id': "mistralai-mistral-tiny"}, - 'mistralai/mistral-nemo': {'persona_id': "mistralai-mistral-nemo"}, - - 'deepseek-ai/deepseek-llm-67b-chat': {'persona_id': "deepseek-ai-deepseek-llm-67b-chat"}, - - 'databricks/dbrx-instruct': {'persona_id': "databricks-dbrx-instruct"}, - - 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'persona_id': "NousResearch-Nous-Hermes-2-Mixtral-8x7B-DPO"}, - - 'x-ai/grok-beta': {'persona_id': "x-ai-grok-beta"}, - - 'anthracite-org/magnum-v4-72b': {'persona_id': "anthracite-org-magnum-v4-72b"}, - - 'cohere/command-r-plus': {'persona_id': "cohere-command-r-plus"}, - - 'ai21/jamba-1-5-mini': {'persona_id': "ai21-jamba-1-5-mini"}, - - 'zero-one-ai/Yi-34B': {'persona_id': "zero-one-ai-Yi-34B"} # Error handling AIML chat completion stream - }, - - 'image': { - 'flux-pro/v1.1': {'persona_id': "flux-1-1-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - 'flux-realism': {'persona_id': "flux-realism"}, - 'flux-pro': {'persona_id': "flux-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - 'flux-pro/v1.1-ultra': {'persona_id': "flux-pro-v1.1-ultra"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - 'flux-pro/v1.1-ultra-raw': {'persona_id': "flux-pro-v1.1-ultra-raw"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan - 'flux/dev': {'persona_id': "flux-dev"}, - - 'dall-e-3': {'persona_id': "dalle-three"}, - - 'recraft-v3': {'persona_id': "recraft"} - } -} - -class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://amigochat.io/chat/" - chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions" - image_api_endpoint = "https://api.amigochat.io/v1/images/generations" - working = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'gpt-4o-mini' - - chat_models = list(MODELS['chat'].keys()) - image_models = list(MODELS['image'].keys()) - models = chat_models + image_models - - model_aliases = { - ### chat ### - "gpt-4o": "gpt-4o-2024-11-20", - "gpt-4o-mini": "gpt-4o-mini", - - "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", - "codellama-34b": "codellama/CodeLlama-34b-Instruct-hf", - - "gemini-flash": "gemini-1.5-flash", - - "claude-3.5-sonnet": "claude-3-5-sonnet-20240620", - "claude-3.5-sonnet": "claude-3-5-sonnet-20241022", - "claude-3.5-haiku": "claude-3-5-haiku-latest", - - "qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct-Turbo", - "gemma-2b": "google/gemma-2b-it", - - "mythomax-13b": "Gryphe/MythoMax-L2-13b", - - "mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.3", - "mistral-nemo": "mistralai/mistral-nemo", - - "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat", - - "dbrx-instruct": "databricks/dbrx-instruct", - - "mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", - - "grok-beta": "x-ai/grok-beta", - - "magnum-72b": "anthracite-org/magnum-v4-72b", - - "command-r-plus": "cohere/command-r-plus", - - "jamba-mini": "ai21/jamba-1-5-mini", - - - ### image ### - "flux-dev": "flux/dev", - } - - @classmethod - def get_personaId(cls, model: str) -> str: - if model in cls.chat_models: - return MODELS['chat'][model]['persona_id'] - elif model in cls.image_models: - return MODELS['image'][model]['persona_id'] - else: - raise ValueError(f"Unknown model: {model}") - - @staticmethod - def generate_chat_id() -> str: - """Generate a chat ID in format: 8-4-4-4-12 hexadecimal digits""" - return str(uuid.uuid4()) - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - stream: bool = False, - timeout: int = 300, - frequency_penalty: float = 0, - max_tokens: int = 4000, - presence_penalty: float = 0, - temperature: float = 0.5, - top_p: float = 0.95, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - device_uuid = str(uuid.uuid4()) - max_retries = 3 - retry_count = 0 - - while retry_count < max_retries: - try: - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "authorization": "Bearer", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": cls.url, - "pragma": "no-cache", - "priority": "u=1, i", - "referer": f"{cls.url}/", - "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", - "x-device-language": "en-US", - "x-device-platform": "web", - "x-device-uuid": device_uuid, - "x-device-version": "1.0.45" - } - - async with StreamSession(headers=headers, proxy=proxy) as session: - if model not in cls.image_models: - data = { - "chatId": cls.generate_chat_id(), - "frequency_penalty": frequency_penalty, - "max_tokens": max_tokens, - "messages": messages, - "model": model, - "personaId": cls.get_personaId(model), - "presence_penalty": presence_penalty, - "stream": stream, - "temperature": temperature, - "top_p": top_p - } - async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response: - await raise_for_status(response) - async for line in response.iter_lines(): - line = line.decode('utf-8').strip() - if line.startswith('data: '): - if line == 'data: [DONE]': - break - try: - chunk = json.loads(line[6:]) # Remove 'data: ' prefix - if 'choices' in chunk and len(chunk['choices']) > 0: - choice = chunk['choices'][0] - if 'delta' in choice: - content = choice['delta'].get('content') - elif 'text' in choice: - content = choice['text'] - else: - content = None - if content: - yield content - except json.JSONDecodeError: - pass - else: - # Image generation - prompt = messages[-1]['content'] - data = { - "prompt": prompt, - "model": model, - "personaId": cls.get_personaId(model) - } - async with session.post(cls.image_api_endpoint, json=data) as response: - await raise_for_status(response) - response_data = await response.json() - if "data" in response_data: - image_urls = [] - for item in response_data["data"]: - if "url" in item: - image_url = item["url"] - image_urls.append(image_url) - if image_urls: - yield ImageResponse(image_urls, prompt) - else: - yield None - break - except (ResponseStatusError, Exception) as e: - retry_count += 1 - if retry_count >= max_retries: - raise e - device_uuid = str(uuid.uuid4()) diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index d7b4c406..71d97ee0 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -1,78 +1,30 @@ from __future__ import annotations -from aiohttp import ClientSession -import json -import uuid -import re -import aiohttp +from aiohttp import ClientSession, TCPConnector, ClientTimeout + from pathlib import Path -from functools import wraps -from typing import Optional, Callable, Any +import re +import json +import random +import string + + from ..typing import AsyncResult, Messages, ImagesType +from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt from ..image import ImageResponse, to_data_uri from ..cookies import get_cookies_dir -from .. import debug - +from .helper import format_prompt +from ..providers.response import FinishReason, JsonConversation -def cached_value(filename: str, cache_key: str = 'validated_value'): - """Universal cache decorator for both memory and file caching""" - def decorator(fetch_func: Callable) -> Callable: - memory_cache: Optional[str] = None - - @wraps(fetch_func) - async def wrapper(cls, *args, force_refresh=False, **kwargs) -> Any: - nonlocal memory_cache - - # If force refresh, clear caches - if force_refresh: - memory_cache = None - try: - cache_file = Path(get_cookies_dir()) / filename - if cache_file.exists(): - cache_file.unlink() - except Exception as e: - debug.log(f"Error clearing cache file: {e}") - - # Check memory cache first - if memory_cache is not None: - return memory_cache - - # Check file cache - cache_file = Path(get_cookies_dir()) / filename - try: - if cache_file.exists(): - with open(cache_file, 'r') as f: - data = json.load(f) - if data.get(cache_key): - memory_cache = data[cache_key] - return memory_cache - except Exception as e: - debug.log(f"Error reading cache file: {e}") - - # Fetch new value - try: - value = await fetch_func(cls, *args, **kwargs) - memory_cache = value - - # Save to file - cache_file.parent.mkdir(exist_ok=True) - try: - with open(cache_file, 'w') as f: - json.dump({cache_key: value}, f) - except Exception as e: - debug.log(f"Error writing to cache file: {e}") - - return value - except Exception as e: - debug.log(f"Error fetching value: {e}") - raise - - return wrapper - return decorator +class Conversation(JsonConversation): + validated_value: str = None + chat_id: str = None + message_history: Messages = [] + def __init__(self, model: str): + self.model = model class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): label = "Blackbox AI" @@ -80,11 +32,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): api_endpoint = "https://www.blackbox.ai/api/chat" working = True - supports_stream = True - supports_system_message = True + needs_auth = True + supports_stream = False + supports_system_message = False supports_message_history = True - default_model = 'blackboxai' + default_model = "blackboxai" default_vision_model = default_model default_image_model = 'ImageGeneration' image_models = [default_image_model] @@ -166,144 +119,201 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): } @classmethod - @cached_value(filename='blackbox.json') - async def get_validated(cls) -> str: - """Fetch validated value from website""" - async with aiohttp.ClientSession() as session: - async with session.get(cls.url) as response: - if response.status != 200: - raise RuntimeError("Failed to get validated value") - - page_content = await response.text() - js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) - - if not js_files: - js_files = re.findall(r'static/js/[a-zA-Z0-9-]+\.js', page_content) + async def fetch_validated( + cls, + url: str = "https://www.blackbox.ai", + force_refresh: bool = False + ) -> Optional[str]: + """ + Asynchronously retrieves the validated_value from the specified URL. + """ + cache_file = Path(get_cookies_dir()) / 'blackbox.json' + + if not force_refresh and cache_file.exists(): + try: + with open(cache_file, 'r') as f: + data = json.load(f) + if data.get('validated_value'): + return data['validated_value'] + except Exception as e: + print(f"Error reading cache: {e}") + + js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js' + uuid_pattern = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' - uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' + def is_valid_context(text: str) -> bool: + """Checks if the context is valid.""" + return any(char + '=' in text for char in 'abcdefghijklmnopqrstuvwxyz') - def is_valid_context(text_around): - return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz') + async with ClientSession() as session: + try: + async with session.get(url) as response: + if response.status != 200: + print("Failed to load the page.") + return None + + page_content = await response.text() + js_files = re.findall(js_file_pattern, page_content) for js_file in js_files: - js_url = f"{cls.url}/_next/{js_file}" - try: - async with session.get(js_url) as js_response: - if js_response.status == 200: - js_content = await js_response.text() - for match in re.finditer(uuid_format, js_content): - start = max(0, match.start() - 10) - end = min(len(js_content), match.end() + 10) - context = js_content[start:end] + js_url = f"{url}/_next/{js_file}" + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + for match in re.finditer(uuid_pattern, js_content): + start = max(0, match.start() - 10) + end = min(len(js_content), match.end() + 10) + context = js_content[start:end] + + if is_valid_context(context): + validated_value = match.group(1) - if is_valid_context(context): - return match.group(1) - except Exception: - continue - - raise RuntimeError("Failed to get validated value") + # Save to cache + cache_file.parent.mkdir(exist_ok=True) + try: + with open(cache_file, 'w') as f: + json.dump({'validated_value': validated_value}, f) + except Exception as e: + print(f"Error writing cache: {e}") + + return validated_value + + except Exception as e: + print(f"Error retrieving validated_value: {e}") + + return None + + @classmethod + def generate_chat_id(cls) -> str: + """Generate a random chat ID""" + chars = string.ascii_letters + string.digits + return ''.join(random.choice(chars) for _ in range(7)) @classmethod async def create_async_generator( cls, model: str, messages: Messages, - proxy: str = None, prompt: str = None, + proxy: str = None, web_search: bool = False, images: ImagesType = None, top_p: float = None, temperature: float = None, max_tokens: int = None, + conversation: Conversation = None, + return_conversation: bool = False, **kwargs ) -> AsyncResult: + model = cls.get_model(model) headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "origin": "https://www.blackbox.ai", - "referer": "https://www.blackbox.ai/", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" + 'accept': '*/*', + 'accept-language': 'en-US,en;q=0.9', + 'content-type': 'application/json', + 'origin': 'https://www.blackbox.ai', + 'referer': 'https://www.blackbox.ai/', + 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' } - model = cls.get_model(model) - - conversation_id = str(uuid.uuid4())[:7] - validated_value = await cls.get_validated() + connector = TCPConnector(limit=10, ttl_dns_cache=300) + timeout = ClientTimeout(total=30) - formatted_message = format_prompt(messages) + async with ClientSession(headers=headers, connector=connector, timeout=timeout) as session: + if conversation is None: + conversation = Conversation(model) + conversation.validated_value = await cls.fetch_validated() + conversation.chat_id = cls.generate_chat_id() + conversation.message_history = [] - first_message = next((msg for msg in messages if msg['role'] == 'user'), None) - current_messages = [{"id": conversation_id, "content": formatted_message, "role": "user"}] + current_messages = [{"id": conversation.chat_id, "content": format_prompt(messages), "role": "user"}] + conversation.message_history.extend(messages) - if images is not None: - current_messages[-1]['data'] = { - "imagesData": [ - { - "filePath": f"/{image_name}", - "contents": to_data_uri(image) - } - for image, image_name in images - ], - "fileText": "", - "title": "" - } - - while True: - async with ClientSession(headers=headers) as session: - data = { - "messages": current_messages, - "id": conversation_id, - "previewToken": None, - "userId": None, - "codeModelMode": True, - "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, - "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, - "isMicMode": False, - "userSystemPrompt": None, - "maxTokens": max_tokens, - "playgroundTopP": top_p, - "playgroundTemperature": temperature, - "isChromeExt": False, - "githubToken": "", - "clickedAnswer2": False, - "clickedAnswer3": False, - "clickedForceWebSearch": False, - "visitFromDelta": False, - "mobileClient": False, - "userSelectedModel": model if model in cls.userSelectedModel else None, - "validated": validated_value, - "imageGenerationMode": False, - "webSearchModePrompt": False, - "deepSearchMode": False, - "domains": None, - "webSearchMode": web_search + if images is not None: + current_messages[-1]['data'] = { + "imagesData": [ + { + "filePath": f"/{image_name}", + "contents": to_data_uri(image) + } + for image, image_name in images + ], + "fileText": "", + "title": "" } + + data = { + "messages": current_messages, + "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, + "id": conversation.chat_id, + "previewToken": None, + "userId": None, + "codeModelMode": True, + "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, + "isMicMode": False, + "userSystemPrompt": None, + "maxTokens": max_tokens, + "playgroundTopP": top_p, + "playgroundTemperature": temperature, + "isChromeExt": False, + "githubToken": "", + "clickedAnswer2": False, + "clickedAnswer3": False, + "clickedForceWebSearch": False, + "visitFromDelta": False, + "mobileClient": False, + "userSelectedModel": model if model in cls.userSelectedModel else None, + "validated": conversation.validated_value, + "imageGenerationMode": False, + "webSearchModePrompt": False, + "deepSearchMode": False, + "domains": None, + "vscodeClient": False, + "codeInterpreterMode": False, + "webSearchMode": web_search + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + response_text = await response.text() + parts = response_text.split('$~~~$') + text_to_yield = parts[2] if len(parts) >= 3 else response_text + + if not text_to_yield or text_to_yield.isspace(): + return + + full_response = "" - try: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - first_chunk = True - content_received = False - async for chunk in response.content: - if chunk: - content_received = True - decoded = chunk.decode() - if first_chunk and "Generated by BLACKBOX.AI" in decoded: - validated_value = await cls.get_validated(force_refresh=True) - break - first_chunk = False - if model in cls.image_models and decoded.startswith("![]("): - image_url = decoded.strip("![]()") - prompt = messages[-1]["content"] - yield ImageResponse(images=image_url, alt=prompt) - else: - yield decoded + if model in cls.image_models: + image_url_match = re.search(r'!\[.*?\]\((.*?)\)', text_to_yield) + if image_url_match: + image_url = image_url_match.group(1) + prompt = messages[-1]["content"] + yield ImageResponse(images=[image_url], alt=prompt) + else: + if "Generated by BLACKBOX.AI" in text_to_yield: + conversation.validated_value = await cls.fetch_validated(force_refresh=True) + if conversation.validated_value: + data["validated"] = conversation.validated_value + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as new_response: + await raise_for_status(new_response) + new_response_text = await new_response.text() + new_parts = new_response_text.split('$~~~$') + new_text = new_parts[2] if len(new_parts) >= 3 else new_response_text + + if new_text and not new_text.isspace(): + yield new_text + full_response = new_text else: - if not content_received: - debug.log("Empty response received from Blackbox API, retrying...") - continue - return - except Exception as e: - debug.log(f"Error in request: {e}") - raise + if text_to_yield and not text_to_yield.isspace(): + yield text_to_yield + full_response = text_to_yield + else: + if text_to_yield and not text_to_yield.isspace(): + yield text_to_yield + full_response = text_to_yield + + if return_conversation: + conversation.message_history.append({"role": "assistant", "content": full_response}) + yield conversation + + yield FinishReason("stop") diff --git a/g4f/Provider/ClaudeSon.py b/g4f/Provider/ClaudeSon.py deleted file mode 100644 index 2dffd24b..00000000 --- a/g4f/Provider/ClaudeSon.py +++ /dev/null @@ -1,43 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - -class ClaudeSon(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://claudeson.net" - api_endpoint = "https://claudeson.net/api/coze/chat" - working = True - - default_model = 'claude-3.5-sonnet' - models = [default_model] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "origin": "https://claudeson.net", - "referer": "https://claudeson.net/", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" - } - async with ClientSession(headers=headers) as session: - data = { - "textStr": format_prompt(messages), - "type": "company" - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in response.content: - if chunk: - yield chunk.decode(errors="ignore") \ No newline at end of file diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index 81d7e3fc..fbe0ad4b 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -5,7 +5,7 @@ import json import asyncio import random -from ..typing import AsyncResult, Messages, Cookies +from ..typing import AsyncResult, Messages from ..requests.raise_for_status import raise_for_status from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt @@ -14,7 +14,6 @@ from ..providers.response import FinishReason, JsonConversation class Conversation(JsonConversation): vqd: str = None message_history: Messages = [] - cookies: dict = {} def __init__(self, model: str): self.model = model @@ -82,15 +81,12 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin): messages: Messages, proxy: str = None, timeout: int = 30, - cookies: Cookies = None, conversation: Conversation = None, return_conversation: bool = False, **kwargs ) -> AsyncResult: model = cls.get_model(model) - if cookies is None and conversation is not None: - cookies = conversation.cookies - async with ClientSession(timeout=ClientTimeout(total=timeout), cookies=cookies) as session: + async with ClientSession(timeout=ClientTimeout(total=timeout)) as session: # Fetch VQD token if conversation is None: conversation = Conversation(model) @@ -129,7 +125,6 @@ class DDG(AsyncGeneratorProvider, ProviderModelMixin): if return_conversation: conversation.message_history.append({"role": "assistant", "content": full_message}) conversation.vqd = response.headers.get("x-vqd-4", conversation.vqd) - conversation.cookies = {n: c.value for n, c in session.cookie_jar.filter_cookies(cls.url).items()} yield conversation if reason is not None: - yield FinishReason(reason) \ No newline at end of file + yield FinishReason(reason) diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py deleted file mode 100644 index 5db74012..00000000 --- a/g4f/Provider/DeepInfraChat.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -import json -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin - -class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://deepinfra.com/chat" - api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions" - - working = True - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' - models = [ - 'meta-llama/Llama-3.3-70B-Instruct', - 'meta-llama/Meta-Llama-3.1-8B-Instruct', - default_model, - 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', - 'Qwen/QwQ-32B-Preview', - 'microsoft/WizardLM-2-8x22B', - 'Qwen/Qwen2.5-72B-Instruct', - 'Qwen/Qwen2.5-Coder-32B-Instruct', - 'nvidia/Llama-3.1-Nemotron-70B-Instruct', - ] - model_aliases = { - "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", - "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", - "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", - "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", - "qwq-32b": "Qwen/QwQ-32B-Preview", - "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", - "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", - "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", - "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct", - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - 'Accept-Language': 'en-US,en;q=0.9', - 'Content-Type': 'application/json', - 'Origin': 'https://deepinfra.com', - 'Referer': 'https://deepinfra.com/', - 'X-Deepinfra-Source': 'web-page', - 'accept': 'text/event-stream', - } - async with ClientSession(headers=headers) as session: - data = { - "model": model, - "messages": messages, - "stream": True - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - await raise_for_status(response) - async for chunk in response.content: - if chunk: - chunk_text = chunk.decode(errors="ignore") - try: - # Handle streaming response - if chunk_text.startswith("data: "): - if chunk_text.strip() == "data: [DONE]": - continue - chunk_data = json.loads(chunk_text[6:]) - content = chunk_data["choices"][0]["delta"].get("content") - if content: - yield content - # Handle non-streaming response - else: - chunk_data = json.loads(chunk_text) - content = chunk_data["choices"][0]["message"].get("content") - if content: - yield content - except (json.JSONDecodeError, KeyError): - continue diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/GPROChat.py new file mode 100644 index 00000000..71465ecf --- /dev/null +++ b/g4f/Provider/GPROChat.py @@ -0,0 +1,69 @@ +from __future__ import annotations + +import time +import hashlib +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt + +class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://gprochat.com" + api_endpoint = "https://gprochat.com/api/generate" + + working = True + supports_stream = True + supports_message_history = True + default_model = 'gemini-1.5-pro' + + @staticmethod + def generate_signature(timestamp: int, message: str) -> str: + secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" + hash_input = f"{timestamp}:{message}:{secret_key}" + signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() + return signature + + @classmethod + def get_model(cls, model: str) -> str: + if model in cls.models: + return model + elif model in cls.model_aliases: + return cls.model_aliases[model] + else: + return cls.default_model + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + timestamp = int(time.time() * 1000) + prompt = format_prompt(messages) + sign = cls.generate_signature(timestamp, prompt) + + headers = { + "accept": "*/*", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "content-type": "text/plain;charset=UTF-8" + } + + data = { + "messages": [{"role": "user", "parts": [{"text": prompt}]}], + "time": timestamp, + "pass": None, + "sign": sign + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 65fffd1e..249b95d8 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -5,13 +5,16 @@ from aiohttp import ClientSession from ..typing import AsyncResult, Messages from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .helper import format_prompt - +from ..providers.response import FinishReason class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.pizzagpt.it" api_endpoint = "/api/chatx-completion" + working = False + default_model = 'gpt-4o-mini' + models = [default_model] @classmethod async def create_async_generator( @@ -27,12 +30,6 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): "content-type": "application/json", "origin": cls.url, "referer": f"{cls.url}/en", - "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36", "x-secret": "Marinara" } @@ -49,3 +46,4 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): if "Misuse detected. please get in touch" in content: raise ValueError(content) yield content + yield FinishReason("stop") diff --git a/g4f/Provider/ReplicateHome.py b/g4f/Provider/ReplicateHome.py deleted file mode 100644 index 0f5452cd..00000000 --- a/g4f/Provider/ReplicateHome.py +++ /dev/null @@ -1,122 +0,0 @@ -from __future__ import annotations - -import json -import asyncio -from aiohttp import ClientSession, ContentTypeError - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..requests.aiohttp import get_connector -from ..requests.raise_for_status import raise_for_status -from .helper import format_prompt -from ..image import ImageResponse - -class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://replicate.com" - api_endpoint = "https://homepage.replicate.com/api/prediction" - working = True - supports_stream = True - - default_model = 'google-deepmind/gemma-2b-it' - default_image_model = 'stability-ai/stable-diffusion-3' - - image_models = [ - 'stability-ai/stable-diffusion-3', - 'bytedance/sdxl-lightning-4step', - 'playgroundai/playground-v2.5-1024px-aesthetic', - ] - - text_models = [ - 'google-deepmind/gemma-2b-it', - ] - - models = text_models + image_models - - model_aliases = { - # image_models - "sd-3": "stability-ai/stable-diffusion-3", - "sdxl": "bytedance/sdxl-lightning-4step", - "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic", - - # text_models - "gemma-2b": "google-deepmind/gemma-2b-it", - } - - model_versions = { - # image_models - 'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f", - 'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f", - 'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24", - - # text_models - "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626", - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - prompt: str = None, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "origin": "https://replicate.com", - "referer": "https://replicate.com/", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" - } - - async with ClientSession(headers=headers, connector=get_connector(proxy=proxy)) as session: - if prompt is None: - if model in cls.image_models: - prompt = messages[-1]['content'] - else: - prompt = format_prompt(messages) - - data = { - "model": model, - "version": cls.model_versions[model], - "input": {"prompt": prompt}, - } - - async with session.post(cls.api_endpoint, json=data) as response: - await raise_for_status(response) - result = await response.json() - prediction_id = result['id'] - - poll_url = f"https://homepage.replicate.com/api/poll?id={prediction_id}" - max_attempts = 30 - delay = 5 - for _ in range(max_attempts): - async with session.get(poll_url) as response: - await raise_for_status(response) - try: - result = await response.json() - except ContentTypeError: - text = await response.text() - try: - result = json.loads(text) - except json.JSONDecodeError: - raise ValueError(f"Unexpected response format: {text}") - - if result['status'] == 'succeeded': - if model in cls.image_models: - image_url = result['output'][0] - yield ImageResponse(image_url, prompt) - return - else: - for chunk in result['output']: - yield chunk - break - elif result['status'] == 'failed': - raise Exception(f"Prediction failed: {result.get('error')}") - await asyncio.sleep(delay) - - if result['status'] != 'succeeded': - raise Exception("Prediction timed out") diff --git a/g4f/Provider/Yqcloud.py b/g4f/Provider/Yqcloud.py new file mode 100644 index 00000000..06d1b3af --- /dev/null +++ b/g4f/Provider/Yqcloud.py @@ -0,0 +1,89 @@ +from __future__ import annotations +import time +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt +from ..providers.response import FinishReason, JsonConversation + +class Conversation(JsonConversation): + userId: str = None + message_history: Messages = [] + + def __init__(self, model: str): + self.model = model + self.userId = f"#/chat/{int(time.time() * 1000)}" + +class Yqcloud(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://chat9.yqcloud.top" + api_endpoint = "https://api.binjie.fun/api/generateStream" + + working = True + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = "gpt-4" + models = [default_model] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = True, + proxy: str = None, + conversation: Conversation = None, + return_conversation: bool = False, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + headers = { + "accept": "application/json, text/plain, */*", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "origin": f"{cls.url}", + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" + } + + if conversation is None: + conversation = Conversation(model) + conversation.message_history = messages + else: + conversation.message_history.append(messages[-1]) + + # Extract system message if present + system_message = "" + current_messages = conversation.message_history + if current_messages and current_messages[0]["role"] == "system": + system_message = current_messages[0]["content"] + current_messages = current_messages[1:] + + async with ClientSession(headers=headers) as session: + prompt = format_prompt(current_messages) + data = { + "prompt": prompt, + "userId": conversation.userId, + "network": True, + "system": system_message, + "withoutContext": False, + "stream": stream + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + full_message = "" + async for chunk in response.content: + if chunk: + message = chunk.decode() + yield message + full_message += message + + if return_conversation: + conversation.message_history.append({"role": "assistant", "content": full_message}) + yield conversation + + yield FinishReason("stop") diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index b095fa73..6910fbc1 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -12,8 +12,9 @@ from .not_working import * from .local import * from .hf_space import HuggingSpace +from .AIChatFree import AIChatFree from .Airforce import Airforce -from .AmigoChat import AmigoChat +from .AIUncensored import AIUncensored from .AutonomousAI import AutonomousAI from .Blackbox import Blackbox from .BlackboxCreateAgent import BlackboxCreateAgent @@ -22,15 +23,14 @@ from .ChatGLM import ChatGLM from .ChatGpt import ChatGpt from .ChatGptEs import ChatGptEs from .ChatGptt import ChatGptt -from .ClaudeSon import ClaudeSon from .Cloudflare import Cloudflare from .Copilot import Copilot from .DarkAI import DarkAI from .DDG import DDG -from .DeepInfraChat import DeepInfraChat from .Free2GPT import Free2GPT from .FreeGpt import FreeGpt from .GizAI import GizAI +from .GPROChat import GPROChat from .ImageLabs import ImageLabs from .Jmuz import Jmuz from .Liaobots import Liaobots @@ -40,10 +40,10 @@ from .Pi import Pi from .Pizzagpt import Pizzagpt from .PollinationsAI import PollinationsAI from .Prodia import Prodia -from .ReplicateHome import ReplicateHome from .RubiksAI import RubiksAI from .TeachAnything import TeachAnything from .You import You +from .Yqcloud import Yqcloud import sys diff --git a/g4f/Provider/hf_space/CohereForAI.py b/g4f/Provider/hf_space/CohereForAI.py index 4adeef60..540091f1 100644 --- a/g4f/Provider/hf_space/CohereForAI.py +++ b/g4f/Provider/hf_space/CohereForAI.py @@ -24,6 +24,13 @@ class CohereForAI(AsyncGeneratorProvider, ProviderModelMixin): "command-r", "command-r7b-12-2024", ] + + model_aliases = { + "command-r-plus": "command-r-plus-08-2024", + "command-r": "command-r-08-2024", + "command-r7b": "command-r7b-12-2024", + + } @classmethod async def create_async_generator( @@ -92,4 +99,4 @@ class CohereForAI(AsyncGeneratorProvider, ProviderModelMixin): elif data["type"] == "title": yield TitleGeneration(data["title"]) elif data["type"] == "finalAnswer": - break \ No newline at end of file + break diff --git a/g4f/Provider/needs_auth/DeepInfra.py b/g4f/Provider/needs_auth/DeepInfra.py index ea114a1b..86993314 100644 --- a/g4f/Provider/needs_auth/DeepInfra.py +++ b/g4f/Provider/needs_auth/DeepInfra.py @@ -3,6 +3,8 @@ from __future__ import annotations import requests from ...typing import AsyncResult, Messages from .OpenaiAPI import OpenaiAPI +from ...requests import StreamSession, raise_for_status +from ...image import ImageResponse class DeepInfra(OpenaiAPI): label = "DeepInfra" @@ -14,6 +16,8 @@ class DeepInfra(OpenaiAPI): supports_stream = True supports_message_history = True default_model = "meta-llama/Meta-Llama-3.1-70B-Instruct" + default_image_model = '' + image_models = [default_image_model] @classmethod def get_models(cls, **kwargs): @@ -21,6 +25,7 @@ class DeepInfra(OpenaiAPI): url = 'https://api.deepinfra.com/models/featured' models = requests.get(url).json() cls.models = [model['model_name'] for model in models if model["type"] == "text-generation"] + cls.image_models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"] return cls.models @classmethod @@ -28,26 +33,26 @@ class DeepInfra(OpenaiAPI): cls, model: str, messages: Messages, - stream: bool, + stream: bool = True, temperature: float = 0.7, max_tokens: int = 1028, + prompt: str = None, **kwargs ) -> AsyncResult: headers = { 'Accept-Encoding': 'gzip, deflate, br', 'Accept-Language': 'en-US', - 'Connection': 'keep-alive', 'Origin': 'https://deepinfra.com', 'Referer': 'https://deepinfra.com/', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-site', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', 'X-Deepinfra-Source': 'web-embed', - 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', } + + # Check if the model is an image model + if model in cls.image_models: + return cls.create_image_generator(messages[-1]["content"] if prompt is None else prompt, model, headers=headers, **kwargs) + + # Text generation return super().create_async_generator( model, messages, stream=stream, @@ -55,4 +60,37 @@ class DeepInfra(OpenaiAPI): max_tokens=max_tokens, headers=headers, **kwargs - ) \ No newline at end of file + ) + + @classmethod + async def create_image_generator( + cls, + prompt: str, + model: str, + api_key: str = None, + api_base: str = "https://api.deepinfra.com/v1/inference", + proxy: str = None, + timeout: int = 180, + headers: dict = None, + extra_data: dict = {}, + **kwargs + ) -> AsyncResult: + if api_key is not None and headers is not None: + headers["Authorization"] = f"Bearer {api_key}" + + async with StreamSession( + proxies={"all": proxy}, + headers=headers, + timeout=timeout + ) as session: + model = cls.get_model(model) + data = {"prompt": prompt, **extra_data} + data = {"input": data} if model == cls.default_image_model else data + async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response: + await raise_for_status(response) + data = await response.json() + images = data.get("output", data.get("images", data.get("image_url"))) + if not images: + raise RuntimeError(f"Response: {data}") + images = images[0] if len(images) == 1 else images + yield ImageResponse(images, prompt) diff --git a/g4f/Provider/needs_auth/DeepInfraImage.py b/g4f/Provider/needs_auth/DeepInfraImage.py deleted file mode 100644 index 32ce683a..00000000 --- a/g4f/Provider/needs_auth/DeepInfraImage.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import annotations - -import requests - -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ...typing import AsyncResult, Messages -from ...requests import StreamSession, raise_for_status -from ...image import ImageResponse - -class DeepInfraImage(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://deepinfra.com" - parent = "DeepInfra" - working = True - needs_auth = True - default_model = '' - image_models = [default_model] - - @classmethod - def get_models(cls): - if not cls.models: - url = 'https://api.deepinfra.com/models/featured' - models = requests.get(url).json() - cls.models = [model['model_name'] for model in models if model["reported_type"] == "text-to-image"] - cls.image_models = cls.models - return cls.models - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - prompt: str = None, - **kwargs - ) -> AsyncResult: - yield await cls.create_async(messages[-1]["content"] if prompt is None else prompt, model, **kwargs) - - @classmethod - async def create_async( - cls, - prompt: str, - model: str, - api_key: str = None, - api_base: str = "https://api.deepinfra.com/v1/inference", - proxy: str = None, - timeout: int = 180, - extra_data: dict = {}, - **kwargs - ) -> ImageResponse: - headers = { - 'Accept-Encoding': 'gzip, deflate, br', - 'Accept-Language': 'en-US', - 'Connection': 'keep-alive', - 'Origin': 'https://deepinfra.com', - 'Referer': 'https://deepinfra.com/', - 'Sec-Fetch-Dest': 'empty', - 'Sec-Fetch-Mode': 'cors', - 'Sec-Fetch-Site': 'same-site', - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36', - 'X-Deepinfra-Source': 'web-embed', - 'sec-ch-ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"macOS"', - } - if api_key is not None: - headers["Authorization"] = f"Bearer {api_key}" - async with StreamSession( - proxies={"all": proxy}, - headers=headers, - timeout=timeout - ) as session: - model = cls.get_model(model) - data = {"prompt": prompt, **extra_data} - data = {"input": data} if model == cls.default_model else data - async with session.post(f"{api_base.rstrip('/')}/{model}", json=data) as response: - await raise_for_status(response) - data = await response.json() - images = data.get("output", data.get("images", data.get("image_url"))) - if not images: - raise RuntimeError(f"Response: {data}") - images = images[0] if len(images) == 1 else images - return ImageResponse(images, prompt) diff --git a/g4f/Provider/needs_auth/GigaChat.py b/g4f/Provider/needs_auth/GigaChat.py new file mode 100644 index 00000000..59da21a2 --- /dev/null +++ b/g4f/Provider/needs_auth/GigaChat.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +import os +import ssl +import time +import uuid +from pathlib import Path + +import json +from aiohttp import ClientSession, TCPConnector, BaseConnector +from ...requests import raise_for_status + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...errors import MissingAuthError +from ..helper import get_connector +from ...cookies import get_cookies_dir + +access_token = "" +token_expires_at = 0 + +RUSSIAN_CA_CERT = """-----BEGIN CERTIFICATE----- +MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx +PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu +ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg +Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS +VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg +YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v +dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n +qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q +XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U +zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX +YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y +Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD +U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD +4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9 +G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH +BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX +ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa +OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf +BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS +BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF +AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH +tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq +W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+ +/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS +AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj +C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV +4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d +WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ +D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC +EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq +391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4= +-----END CERTIFICATE-----""" + +class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://developers.sber.ru/gigachat" + working = True + supports_message_history = True + supports_system_message = True + supports_stream = True + needs_auth = True + default_model = "GigaChat:latest" + models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"] + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = True, + proxy: str = None, + api_key: str = None, + connector: BaseConnector = None, + scope: str = "GIGACHAT_API_PERS", + update_interval: float = 0, + **kwargs + ) -> AsyncResult: + global access_token, token_expires_at + model = cls.get_model(model) + if not api_key: + raise MissingAuthError('Missing "api_key"') + + # Create certificate file in cookies directory + cookies_dir = Path(get_cookies_dir()) + cert_file = cookies_dir / 'russian_trusted_root_ca.crt' + + # Write certificate if it doesn't exist + if not cert_file.exists(): + cert_file.write_text(RUSSIAN_CA_CERT) + + ssl_context = ssl.create_default_context(cafile=str(cert_file)) + if connector is None: + connector = TCPConnector(ssl_context=ssl_context) + + async with ClientSession(connector=get_connector(connector, proxy)) as session: + if token_expires_at - int(time.time() * 1000) < 60000: + async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth", + headers={"Authorization": f"Bearer {api_key}", + "RqUID": str(uuid.uuid4()), + "Content-Type": "application/x-www-form-urlencoded"}, + data={"scope": scope}) as response: + await raise_for_status(response) + data = await response.json() + access_token = data['access_token'] + token_expires_at = data['expires_at'] + + async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions", + headers={"Authorization": f"Bearer {access_token}"}, + json={ + "model": model, + "messages": messages, + "stream": stream, + "update_interval": update_interval, + **kwargs + }) as response: + await raise_for_status(response) + + async for line in response.content: + if not stream: + yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content'] + return + + if line and line.startswith(b"data:"): + line = line[6:-1] # remove "data: " prefix and "\n" suffix + if line.strip() == b"[DONE]": + return + else: + msg = json.loads(line.decode("utf-8"))['choices'][0] + content = msg['delta']['content'] + + if content: + yield content + + if 'finish_reason' in msg: + return diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 4ddfd597..b5a7c410 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -43,7 +43,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', 'Qwen/Qwen2.5-Coder-32B-Instruct', 'meta-llama/Llama-3.2-11B-Vision-Instruct', - 'NousResearch/Hermes-3-Llama-3.1-8B', 'mistralai/Mistral-Nemo-Instruct-2407', 'microsoft/Phi-3.5-mini-instruct', ] + image_models @@ -56,7 +55,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct", - "hermes-3": "NousResearch/Hermes-3-Llama-3.1-8B", "mistral-nemo": "mistralai/Mistral-Nemo-Instruct-2407", "phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct", diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index 96241d6a..03898013 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,14 +1,12 @@ -from .gigachat import * - from .BingCreateImages import BingCreateImages from .Cerebras import Cerebras from .CopilotAccount import CopilotAccount from .Custom import Custom from .DeepInfra import DeepInfra -from .DeepInfraImage import DeepInfraImage from .DeepSeek import DeepSeek from .Gemini import Gemini from .GeminiPro import GeminiPro +from .GigaChat import GigaChat from .GithubCopilot import GithubCopilot from .GlhfChat import GlhfChat from .Groq import Groq diff --git a/g4f/Provider/needs_auth/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py deleted file mode 100644 index c9f1c011..00000000 --- a/g4f/Provider/needs_auth/gigachat/GigaChat.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import annotations - -import os -import ssl -import time -import uuid - -import json -from aiohttp import ClientSession, TCPConnector, BaseConnector -from g4f.requests import raise_for_status - -from ....typing import AsyncResult, Messages -from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ....errors import MissingAuthError -from ...helper import get_connector - -access_token = "" -token_expires_at = 0 - -class GigaChat(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://developers.sber.ru/gigachat" - working = True - supports_message_history = True - supports_system_message = True - supports_stream = True - needs_auth = True - default_model = "GigaChat:latest" - models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"] - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - stream: bool = True, - proxy: str = None, - api_key: str = None, - connector: BaseConnector = None, - scope: str = "GIGACHAT_API_PERS", - update_interval: float = 0, - **kwargs - ) -> AsyncResult: - global access_token, token_expires_at - model = cls.get_model(model) - if not api_key: - raise MissingAuthError('Missing "api_key"') - - cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt") - ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None - if connector is None and ssl_context is not None: - connector = TCPConnector(ssl_context=ssl_context) - async with ClientSession(connector=get_connector(connector, proxy)) as session: - if token_expires_at - int(time.time() * 1000) < 60000: - async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth", - headers={"Authorization": f"Bearer {api_key}", - "RqUID": str(uuid.uuid4()), - "Content-Type": "application/x-www-form-urlencoded"}, - data={"scope": scope}) as response: - await raise_for_status(response) - data = await response.json() - access_token = data['access_token'] - token_expires_at = data['expires_at'] - - async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions", - headers={"Authorization": f"Bearer {access_token}"}, - json={ - "model": model, - "messages": messages, - "stream": stream, - "update_interval": update_interval, - **kwargs - }) as response: - await raise_for_status(response) - - async for line in response.content: - if not stream: - yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content'] - return - - if line and line.startswith(b"data:"): - line = line[6:-1] # remove "data: " prefix and "\n" suffix - if line.strip() == b"[DONE]": - return - else: - msg = json.loads(line.decode("utf-8"))['choices'][0] - content = msg['delta']['content'] - - if content: - yield content - - if 'finish_reason' in msg: - return diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py deleted file mode 100644 index c9853742..00000000 --- a/g4f/Provider/needs_auth/gigachat/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .GigaChat import GigaChat - diff --git a/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt deleted file mode 100644 index 4c143a21..00000000 --- a/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt +++ /dev/null @@ -1,33 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx -PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu -ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg -Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS -VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg -YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v -dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n -qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q -XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U -zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX -YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y -Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD -U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD -4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9 -G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH -BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX -ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa -OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf -BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS -BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF -AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH -tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq -W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+ -/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS -AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj -C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV -4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d -WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ -D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC -EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq -391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4= ------END CERTIFICATE----- \ No newline at end of file diff --git a/g4f/Provider/not_working/AIChatFree.py b/g4f/Provider/not_working/AIChatFree.py deleted file mode 100644 index a4f80d47..00000000 --- a/g4f/Provider/not_working/AIChatFree.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import annotations - -import time -from hashlib import sha256 - -from aiohttp import BaseConnector, ClientSession - -from ...errors import RateLimitError -from ...requests import raise_for_status -from ...requests.aiohttp import get_connector -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin - - -class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://aichatfree.info/" - working = False - supports_stream = True - supports_message_history = True - default_model = 'gemini-pro' - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - connector: BaseConnector = None, - **kwargs, - ) -> AsyncResult: - headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0", - "Accept": "*/*", - "Accept-Language": "en-US,en;q=0.5", - "Accept-Encoding": "gzip, deflate, br", - "Content-Type": "text/plain;charset=UTF-8", - "Referer": f"{cls.url}/", - "Origin": cls.url, - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", - "Connection": "keep-alive", - "TE": "trailers", - } - async with ClientSession( - connector=get_connector(connector, proxy), headers=headers - ) as session: - timestamp = int(time.time() * 1e3) - data = { - "messages": [ - { - "role": "model" if message["role"] == "assistant" else "user", - "parts": [{"text": message["content"]}], - } - for message in messages - ], - "time": timestamp, - "pass": None, - "sign": generate_signature(timestamp, messages[-1]["content"]), - } - async with session.post( - f"{cls.url}/api/generate", json=data, proxy=proxy - ) as response: - if response.status == 500: - if "Quota exceeded" in await response.text(): - raise RateLimitError( - f"Response {response.status}: Rate limit reached" - ) - await raise_for_status(response) - async for chunk in response.content.iter_any(): - yield chunk.decode(errors="ignore") - - -def generate_signature(time: int, text: str, secret: str = ""): - message = f"{time}:{text}:{secret}" - return sha256(message.encode()).hexdigest() diff --git a/g4f/Provider/not_working/AIUncensored.py b/g4f/Provider/not_working/AIUncensored.py deleted file mode 100644 index 7f4c8a94..00000000 --- a/g4f/Provider/not_working/AIUncensored.py +++ /dev/null @@ -1,132 +0,0 @@ -from __future__ import annotations - -import json -import random -from aiohttp import ClientSession, ClientError -import asyncio -from itertools import cycle - -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ...image import ImageResponse - -class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://www.aiuncensored.info/ai_uncensored" - api_endpoints_text = [ - "https://twitterclone-i0wr.onrender.com/api/chat", - "https://twitterclone-4e8t.onrender.com/api/chat", - "https://twitterclone-8wd1.onrender.com/api/chat", - ] - api_endpoints_image = [ - "https://twitterclone-4e8t.onrender.com/api/image", - "https://twitterclone-i0wr.onrender.com/api/image", - "https://twitterclone-8wd1.onrender.com/api/image", - ] - working = False - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'TextGenerations' - text_models = [default_model] - image_models = ['ImageGenerations'] - models = [*text_models, *image_models] - - model_aliases = { - "flux": "ImageGenerations", - } - - @staticmethod - def generate_cipher() -> str: - """Generate a cipher in format like '3221229284179118'""" - return ''.join([str(random.randint(0, 9)) for _ in range(16)]) - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - 'accept': '*/*', - 'accept-language': 'en-US,en;q=0.9', - 'cache-control': 'no-cache', - 'content-type': 'application/json', - 'origin': 'https://www.aiuncensored.info', - 'pragma': 'no-cache', - 'priority': 'u=1, i', - 'referer': 'https://www.aiuncensored.info/', - 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Linux"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'cross-site', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36' - } - - async with ClientSession(headers=headers) as session: - if model in cls.image_models: - prompt = messages[-1]['content'] - data = { - "prompt": prompt, - "cipher": cls.generate_cipher() - } - - endpoints = cycle(cls.api_endpoints_image) - - while True: - endpoint = next(endpoints) - try: - async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response: - response.raise_for_status() - response_data = await response.json() - image_url = response_data['image_url'] - image_response = ImageResponse(images=image_url, alt=prompt) - yield image_response - return - except (ClientError, asyncio.TimeoutError): - continue - - elif model in cls.text_models: - data = { - "messages": messages, - "cipher": cls.generate_cipher() - } - - endpoints = cycle(cls.api_endpoints_text) - - while True: - endpoint = next(endpoints) - try: - async with session.post(endpoint, json=data, proxy=proxy, timeout=10) as response: - response.raise_for_status() - full_response = "" - async for line in response.content: - line = line.decode('utf-8') - if line.startswith("data: "): - try: - json_str = line[6:] - if json_str != "[DONE]": - data = json.loads(json_str) - if "data" in data: - full_response += data["data"] - yield data["data"] - except json.JSONDecodeError: - continue - return - except (ClientError, asyncio.TimeoutError): - continue diff --git a/g4f/Provider/not_working/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py deleted file mode 100644 index 9b55e4ff..00000000 --- a/g4f/Provider/not_working/Ai4Chat.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import annotations - -import json -import re -import logging -from aiohttp import ClientSession - -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..helper import format_prompt - -logger = logging.getLogger(__name__) - -class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin): - label = "AI4Chat" - url = "https://www.ai4chat.co" - api_endpoint = "https://www.ai4chat.co/generate-response" - working = False - supports_stream = True - supports_system_message = True - supports_message_history = True - - default_model = 'gpt-4' - models = [default_model] - - model_aliases = {} - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": "https://www.ai4chat.co", - "pragma": "no-cache", - "priority": "u=1, i", - "referer": "https://www.ai4chat.co/gpt/talkdirtytome", - "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Linux"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" - } - - async with ClientSession(headers=headers) as session: - data = { - "messages": [ - { - "role": "user", - "content": format_prompt(messages) - } - ] - } - - try: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - result = await response.text() - - json_result = json.loads(result) - - message = json_result.get("message", "") - - clean_message = re.sub(r'<[^>]+>', '', message) - - yield clean_message - except Exception as e: - logger.exception("Error while calling AI 4Chat API: %s", e) - yield f"Error: {e}" diff --git a/g4f/Provider/not_working/AmigoChat.py b/g4f/Provider/not_working/AmigoChat.py new file mode 100644 index 00000000..31d1b10b --- /dev/null +++ b/g4f/Provider/not_working/AmigoChat.py @@ -0,0 +1,251 @@ +from __future__ import annotations + +import json +import uuid + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...image import ImageResponse +from ...requests import StreamSession, raise_for_status +from ...errors import ResponseStatusError + +MODELS = { + 'chat': { + 'gpt-4o-2024-11-20': {'persona_id': "gpt"}, + 'gpt-4o': {'persona_id': "summarizer"}, + 'gpt-4o-mini': {'persona_id': "amigo"}, + + 'o1-preview-': {'persona_id': "openai-o-one"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + 'o1-preview-2024-09-12-': {'persona_id': "orion"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + 'o1-mini-': {'persona_id': "openai-o-one-mini"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + + 'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': {'persona_id': "llama-three-point-one"}, + 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': {'persona_id': "llama-3-2"}, + 'codellama/CodeLlama-34b-Instruct-hf': {'persona_id': "codellama-CodeLlama-34b-Instruct-hf"}, + + 'gemini-1.5-pro': {'persona_id': "gemini-1-5-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + 'gemini-1.5-flash': {'persona_id': "gemini-1.5-flash"}, + + 'claude-3-5-sonnet-20240620': {'persona_id': "claude"}, + 'claude-3-5-sonnet-20241022': {'persona_id': "clude-claude-3-5-sonnet-20241022"}, + 'claude-3-5-haiku-latest': {'persona_id': "3-5-haiku"}, + + 'Qwen/Qwen2.5-72B-Instruct-Turbo': {'persona_id': "qwen-2-5"}, + + 'google/gemma-2b-it': {'persona_id': "google-gemma-2b-it"}, + 'google/gemma-7b': {'persona_id': "google-gemma-7b"}, # Error handling AIML chat completion stream + + 'Gryphe/MythoMax-L2-13b': {'persona_id': "Gryphe-MythoMax-L2-13b"}, + + 'mistralai/Mistral-7B-Instruct-v0.3': {'persona_id': "mistralai-Mistral-7B-Instruct-v0.1"}, + 'mistralai/mistral-tiny': {'persona_id': "mistralai-mistral-tiny"}, + 'mistralai/mistral-nemo': {'persona_id': "mistralai-mistral-nemo"}, + + 'deepseek-ai/deepseek-llm-67b-chat': {'persona_id': "deepseek-ai-deepseek-llm-67b-chat"}, + + 'databricks/dbrx-instruct': {'persona_id': "databricks-dbrx-instruct"}, + + 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'persona_id': "NousResearch-Nous-Hermes-2-Mixtral-8x7B-DPO"}, + + 'x-ai/grok-beta': {'persona_id': "x-ai-grok-beta"}, + + 'anthracite-org/magnum-v4-72b': {'persona_id': "anthracite-org-magnum-v4-72b"}, + + 'cohere/command-r-plus': {'persona_id': "cohere-command-r-plus"}, + + 'ai21/jamba-1-5-mini': {'persona_id': "ai21-jamba-1-5-mini"}, + + 'zero-one-ai/Yi-34B': {'persona_id': "zero-one-ai-Yi-34B"} # Error handling AIML chat completion stream + }, + + 'image': { + 'flux-pro/v1.1': {'persona_id': "flux-1-1-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + 'flux-realism': {'persona_id': "flux-realism"}, + 'flux-pro': {'persona_id': "flux-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + 'flux-pro/v1.1-ultra': {'persona_id': "flux-pro-v1.1-ultra"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + 'flux-pro/v1.1-ultra-raw': {'persona_id': "flux-pro-v1.1-ultra-raw"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan + 'flux/dev': {'persona_id': "flux-dev"}, + + 'dall-e-3': {'persona_id': "dalle-three"}, + + 'recraft-v3': {'persona_id': "recraft"} + } +} + +class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://amigochat.io/chat/" + chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions" + image_api_endpoint = "https://api.amigochat.io/v1/images/generations" + + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o-mini' + + chat_models = list(MODELS['chat'].keys()) + image_models = list(MODELS['image'].keys()) + models = chat_models + image_models + + model_aliases = { + ### chat ### + "gpt-4o": "gpt-4o-2024-11-20", + "gpt-4o-mini": "gpt-4o-mini", + + "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", + "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo", + "codellama-34b": "codellama/CodeLlama-34b-Instruct-hf", + + "gemini-flash": "gemini-1.5-flash", + + "claude-3.5-sonnet": "claude-3-5-sonnet-20240620", + "claude-3.5-sonnet": "claude-3-5-sonnet-20241022", + "claude-3.5-haiku": "claude-3-5-haiku-latest", + + "qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct-Turbo", + "gemma-2b": "google/gemma-2b-it", + + "mythomax-13b": "Gryphe/MythoMax-L2-13b", + + "mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.3", + "mistral-nemo": "mistralai/mistral-nemo", + + "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat", + + "dbrx-instruct": "databricks/dbrx-instruct", + + "mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + + "grok-beta": "x-ai/grok-beta", + + "magnum-72b": "anthracite-org/magnum-v4-72b", + + "command-r-plus": "cohere/command-r-plus", + + "jamba-mini": "ai21/jamba-1-5-mini", + + + ### image ### + "flux-dev": "flux/dev", + } + + @classmethod + def get_personaId(cls, model: str) -> str: + if model in cls.chat_models: + return MODELS['chat'][model]['persona_id'] + elif model in cls.image_models: + return MODELS['image'][model]['persona_id'] + else: + raise ValueError(f"Unknown model: {model}") + + @staticmethod + def generate_chat_id() -> str: + """Generate a chat ID in format: 8-4-4-4-12 hexadecimal digits""" + return str(uuid.uuid4()) + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + stream: bool = False, + timeout: int = 300, + frequency_penalty: float = 0, + max_tokens: int = 4000, + presence_penalty: float = 0, + temperature: float = 0.5, + top_p: float = 0.95, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + device_uuid = str(uuid.uuid4()) + max_retries = 3 + retry_count = 0 + + while retry_count < max_retries: + try: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "authorization": "Bearer", + "cache-control": "no-cache", + "content-type": "application/json", + "origin": cls.url, + "pragma": "no-cache", + "priority": "u=1, i", + "referer": f"{cls.url}/", + "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"', + "sec-ch-ua-mobile": "?0", + "sec-ch-ua-platform": '"Linux"', + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "x-device-language": "en-US", + "x-device-platform": "web", + "x-device-uuid": device_uuid, + "x-device-version": "1.0.45" + } + + async with StreamSession(headers=headers, proxy=proxy) as session: + if model not in cls.image_models: + data = { + "chatId": cls.generate_chat_id(), + "frequency_penalty": frequency_penalty, + "max_tokens": max_tokens, + "messages": messages, + "model": model, + "personaId": cls.get_personaId(model), + "presence_penalty": presence_penalty, + "stream": stream, + "temperature": temperature, + "top_p": top_p + } + async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response: + await raise_for_status(response) + async for line in response.iter_lines(): + line = line.decode('utf-8').strip() + if line.startswith('data: '): + if line == 'data: [DONE]': + break + try: + chunk = json.loads(line[6:]) # Remove 'data: ' prefix + if 'choices' in chunk and len(chunk['choices']) > 0: + choice = chunk['choices'][0] + if 'delta' in choice: + content = choice['delta'].get('content') + elif 'text' in choice: + content = choice['text'] + else: + content = None + if content: + yield content + except json.JSONDecodeError: + pass + else: + # Image generation + prompt = messages[-1]['content'] + data = { + "prompt": prompt, + "model": model, + "personaId": cls.get_personaId(model) + } + async with session.post(cls.image_api_endpoint, json=data) as response: + await raise_for_status(response) + response_data = await response.json() + if "data" in response_data: + image_urls = [] + for item in response_data["data"]: + if "url" in item: + image_url = item["url"] + image_urls.append(image_url) + if image_urls: + yield ImageResponse(image_urls, prompt) + else: + yield None + break + except (ResponseStatusError, Exception) as e: + retry_count += 1 + if retry_count >= max_retries: + raise e + device_uuid = str(uuid.uuid4()) diff --git a/g4f/Provider/not_working/DeepInfraChat.py b/g4f/Provider/not_working/DeepInfraChat.py new file mode 100644 index 00000000..17e6a284 --- /dev/null +++ b/g4f/Provider/not_working/DeepInfraChat.py @@ -0,0 +1,88 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ...requests.raise_for_status import raise_for_status +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin + +class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://deepinfra.com/chat" + api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions" + + working = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' + models = [ + 'meta-llama/Llama-3.3-70B-Instruct', + 'meta-llama/Meta-Llama-3.1-8B-Instruct', + default_model, + 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', + 'Qwen/QwQ-32B-Preview', + 'microsoft/WizardLM-2-8x22B', + 'Qwen/Qwen2.5-72B-Instruct', + 'Qwen/Qwen2.5-Coder-32B-Instruct', + 'nvidia/Llama-3.1-Nemotron-70B-Instruct', + ] + model_aliases = { + "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", + "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", + "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", + "qwq-32b": "Qwen/QwQ-32B-Preview", + "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", + "qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct", + "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", + "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + 'Accept-Language': 'en-US,en;q=0.9', + 'Content-Type': 'application/json', + 'Origin': 'https://deepinfra.com', + 'Referer': 'https://deepinfra.com/', + 'X-Deepinfra-Source': 'web-page', + 'accept': 'text/event-stream', + } + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": messages, + "stream": True + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + async for chunk in response.content: + if chunk: + chunk_text = chunk.decode(errors="ignore") + try: + # Handle streaming response + if chunk_text.startswith("data: "): + if chunk_text.strip() == "data: [DONE]": + continue + chunk_data = json.loads(chunk_text[6:]) + content = chunk_data["choices"][0]["delta"].get("content") + if content: + yield content + # Handle non-streaming response + else: + chunk_data = json.loads(chunk_text) + content = chunk_data["choices"][0]["message"].get("content") + if content: + yield content + except (json.JSONDecodeError, KeyError): + continue diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py deleted file mode 100644 index 52c7f947..00000000 --- a/g4f/Provider/not_working/GPROChat.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations -import hashlib -import time -from aiohttp import ClientSession -from ...typing import AsyncResult, Messages -from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin -from ..helper import format_prompt - -class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): - label = "GPROChat" - url = "https://gprochat.com" - api_endpoint = "https://gprochat.com/api/generate" - working = False - supports_stream = True - supports_message_history = True - default_model = 'gemini-pro' - - @staticmethod - def generate_signature(timestamp: int, message: str) -> str: - secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" - hash_input = f"{timestamp}:{message}:{secret_key}" - signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() - return signature - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - timestamp = int(time.time() * 1000) - prompt = format_prompt(messages) - sign = cls.generate_signature(timestamp, prompt) - - headers = { - "accept": "*/*", - "origin": cls.url, - "referer": f"{cls.url}/", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", - "content-type": "text/plain;charset=UTF-8" - } - - data = { - "messages": [{"role": "user", "parts": [{"text": prompt}]}], - "time": timestamp, - "pass": None, - "sign": sign - } - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content.iter_any(): - if chunk: - yield chunk.decode() diff --git a/g4f/Provider/not_working/ReplicateHome.py b/g4f/Provider/not_working/ReplicateHome.py new file mode 100644 index 00000000..e8a99e83 --- /dev/null +++ b/g4f/Provider/not_working/ReplicateHome.py @@ -0,0 +1,123 @@ +from __future__ import annotations + +import json +import asyncio +from aiohttp import ClientSession, ContentTypeError + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ...requests.aiohttp import get_connector +from ...requests.raise_for_status import raise_for_status +from ..helper import format_prompt +from ...image import ImageResponse + +class ReplicateHome(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://replicate.com" + api_endpoint = "https://homepage.replicate.com/api/prediction" + + working = False + supports_stream = True + + default_model = 'google-deepmind/gemma-2b-it' + default_image_model = 'stability-ai/stable-diffusion-3' + + image_models = [ + 'stability-ai/stable-diffusion-3', + 'bytedance/sdxl-lightning-4step', + 'playgroundai/playground-v2.5-1024px-aesthetic', + ] + + text_models = [ + 'google-deepmind/gemma-2b-it', + ] + + models = text_models + image_models + + model_aliases = { + # image_models + "sd-3": "stability-ai/stable-diffusion-3", + "sdxl": "bytedance/sdxl-lightning-4step", + "playground-v2.5": "playgroundai/playground-v2.5-1024px-aesthetic", + + # text_models + "gemma-2b": "google-deepmind/gemma-2b-it", + } + + model_versions = { + # image_models + 'stability-ai/stable-diffusion-3': "527d2a6296facb8e47ba1eaf17f142c240c19a30894f437feee9b91cc29d8e4f", + 'bytedance/sdxl-lightning-4step': "5f24084160c9089501c1b3545d9be3c27883ae2239b6f412990e82d4a6210f8f", + 'playgroundai/playground-v2.5-1024px-aesthetic': "a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24", + + # text_models + "google-deepmind/gemma-2b-it": "dff94eaf770e1fc211e425a50b51baa8e4cac6c39ef074681f9e39d778773626", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + prompt: str = None, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "origin": "https://replicate.com", + "referer": "https://replicate.com/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36" + } + + async with ClientSession(headers=headers, connector=get_connector(proxy=proxy)) as session: + if prompt is None: + if model in cls.image_models: + prompt = messages[-1]['content'] + else: + prompt = format_prompt(messages) + + data = { + "model": model, + "version": cls.model_versions[model], + "input": {"prompt": prompt}, + } + + async with session.post(cls.api_endpoint, json=data) as response: + await raise_for_status(response) + result = await response.json() + prediction_id = result['id'] + + poll_url = f"https://homepage.replicate.com/api/poll?id={prediction_id}" + max_attempts = 30 + delay = 5 + for _ in range(max_attempts): + async with session.get(poll_url) as response: + await raise_for_status(response) + try: + result = await response.json() + except ContentTypeError: + text = await response.text() + try: + result = json.loads(text) + except json.JSONDecodeError: + raise ValueError(f"Unexpected response format: {text}") + + if result['status'] == 'succeeded': + if model in cls.image_models: + image_url = result['output'][0] + yield ImageResponse(image_url, prompt) + return + else: + for chunk in result['output']: + yield chunk + break + elif result['status'] == 'failed': + raise Exception(f"Prediction failed: {result.get('error')}") + await asyncio.sleep(delay) + + if result['status'] != 'succeeded': + raise Exception("Prediction timed out") diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py index 9ca89cf9..7bec0a36 100644 --- a/g4f/Provider/not_working/__init__.py +++ b/g4f/Provider/not_working/__init__.py @@ -1,18 +1,18 @@ from .AI365VIP import AI365VIP -from .AIChatFree import AIChatFree from .AiChatOnline import AiChatOnline from .AiChats import AiChats -from .AIUncensored import AIUncensored +from .AmigoChat import AmigoChat from .Aura import Aura from .Chatgpt4o import Chatgpt4o from .Chatgpt4Online import Chatgpt4Online from .ChatgptFree import ChatgptFree +from .DeepInfraChat import DeepInfraChat from .FlowGpt import FlowGpt from .FreeNetfly import FreeNetfly -from .GPROChat import GPROChat from .Koala import Koala from .MagickPen import MagickPen from .MyShell import MyShell +from .ReplicateHome import ReplicateHome from .RobocodersAPI import RobocodersAPI from .Theb import Theb from .Upstage import Upstage diff --git a/g4f/models.py b/g4f/models.py index 6d07dbf6..f05ea71f 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -4,7 +4,9 @@ from dataclasses import dataclass from .Provider import IterListProvider, ProviderType from .Provider import ( + AIChatFree, Airforce, + AIUncensored, AutonomousAI, Blackbox, BlackboxCreateAgent, @@ -14,19 +16,18 @@ from .Provider import ( ChatGpt, ChatGptEs, ChatGptt, - ClaudeSon, Cloudflare, Copilot, CopilotAccount, DarkAI, DDG, - DeepInfraChat, GigaChat, Gemini, GeminiPro, HuggingChat, HuggingFace, HuggingSpace, + GPROChat, Jmuz, Liaobots, Mhystical, @@ -38,9 +39,9 @@ from .Provider import ( Pi, PollinationsAI, Reka, - ReplicateHome, RubiksAI, TeachAnything, + Yqcloud, ) @dataclass(unsafe_hash=True) @@ -80,8 +81,8 @@ default = Model( CablyAI, OpenaiChat, DarkAI, - ClaudeSon, - DeepInfraChat, + Yqcloud, + AIUncensored, Airforce, Cloudflare, ]) @@ -103,7 +104,7 @@ gpt_35_turbo = Model( gpt_4 = Model( name = 'gpt-4', base_provider = 'OpenAI', - best_provider = IterListProvider([DDG, Blackbox, Jmuz, ChatGptEs, ChatGptt, PollinationsAI, Copilot, OpenaiChat, Liaobots, Mhystical]) + best_provider = IterListProvider([DDG, Blackbox, Jmuz, ChatGptEs, ChatGptt, PollinationsAI, Copilot, Yqcloud, OpenaiChat, Liaobots, Mhystical]) ) # gpt-4o @@ -169,13 +170,13 @@ llama_3_8b = Model( llama_3_1_8b = Model( name = "llama-3.1-8b", base_provider = "Meta Llama", - best_provider = IterListProvider([Blackbox, Jmuz, DeepInfraChat, Cloudflare, Airforce, PerplexityLabs]) + best_provider = IterListProvider([Blackbox, Jmuz, Cloudflare, Airforce, PerplexityLabs]) ) llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, Jmuz, Blackbox, DeepInfraChat, BlackboxCreateAgent, TeachAnything, DarkAI, Airforce, RubiksAI, PerplexityLabs]) + best_provider = IterListProvider([DDG, Jmuz, Blackbox, BlackboxCreateAgent, TeachAnything, DarkAI, Airforce, RubiksAI, PerplexityLabs]) ) llama_3_1_405b = Model( @@ -257,7 +258,7 @@ hermes_2_pro = Model( hermes_3 = Model( name = "hermes-3", base_provider = "NousResearch", - best_provider = IterListProvider([AutonomousAI, HuggingChat, HuggingFace]) + best_provider = IterListProvider([AutonomousAI, AIUncensored]) ) @@ -286,7 +287,7 @@ gemini = Model( gemini_1_5_pro = Model( name = 'gemini-1.5-pro', base_provider = 'Google DeepMind', - best_provider = IterListProvider([Blackbox, Jmuz, Gemini, GeminiPro, Liaobots]) + best_provider = IterListProvider([Blackbox, Jmuz, GPROChat, AIChatFree, Gemini, GeminiPro, Liaobots]) ) gemini_1_5_flash = Model( @@ -308,12 +309,6 @@ gemini_2_0_flash_thinking = Model( best_provider = Liaobots ) -# gemma -gemma_2b = Model( - name = 'gemma-2b', - base_provider = 'Google', - best_provider = ReplicateHome -) ### Anthropic ### # claude 3 @@ -340,7 +335,7 @@ claude_3_opus = Model( claude_3_5_sonnet = Model( name = 'claude-3.5-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Blackbox, PollinationsAI, Jmuz, ClaudeSon, Liaobots]) + best_provider = IterListProvider([Blackbox, PollinationsAI, Jmuz, Liaobots]) ) ### Reka AI ### @@ -364,16 +359,22 @@ blackboxai_pro = Model( ) ### CohereForAI ### +command_r = Model( + name = 'command-r', + base_provider = 'CohereForAI', + best_provider = IterListProvider([HuggingSpace, PollinationsAI]) +) + command_r_plus = Model( name = 'command-r-plus', base_provider = 'CohereForAI', - best_provider = HuggingChat + best_provider = IterListProvider([HuggingSpace, HuggingChat]) ) -command_r = Model( - name = 'command-r', +command_r7b = Model( + name = 'command-r7b', base_provider = 'CohereForAI', - best_provider = PollinationsAI + best_provider = HuggingSpace ) ### Qwen ### @@ -388,7 +389,7 @@ qwen_1_5_7b = Model( qwen_2_72b = Model( name = 'qwen-2-72b', base_provider = 'Qwen', - best_provider = IterListProvider([PollinationsAI, DeepInfraChat]) + best_provider = PollinationsAI ) # qwen 2.5 @@ -401,13 +402,13 @@ qwen_2_5_72b = Model( qwen_2_5_coder_32b = Model( name = 'qwen-2.5-coder-32b', base_provider = 'Qwen', - best_provider = IterListProvider([Jmuz, PollinationsAI, AutonomousAI, DeepInfraChat, HuggingChat]) + best_provider = IterListProvider([Jmuz, PollinationsAI, AutonomousAI, HuggingChat]) ) qwq_32b = Model( name = 'qwq-32b', base_provider = 'Qwen', - best_provider = IterListProvider([Blackbox, Jmuz, HuggingSpace, DeepInfraChat, HuggingChat]) + best_provider = IterListProvider([Blackbox, Jmuz, HuggingSpace, HuggingChat]) ) ### Inflection ### @@ -434,7 +435,7 @@ deepseek_coder = Model( wizardlm_2_8x22b = Model( name = 'wizardlm-2-8x22b', base_provider = 'WizardLM', - best_provider = IterListProvider([Jmuz, DeepInfraChat]) + best_provider = Jmuz ) ### OpenChat ### @@ -469,7 +470,7 @@ sonar_chat = Model( nemotron_70b = Model( name = 'nemotron-70b', base_provider = 'Nvidia', - best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace]) + best_provider = IterListProvider([HuggingChat, HuggingFace]) ) ### Teknium ### @@ -575,28 +576,16 @@ rtist = Model( sdxl = ImageModel( name = 'sdxl', base_provider = 'Stability AI', - best_provider = IterListProvider([ReplicateHome, Airforce]) + best_provider = Airforce ) -sd_3 = ImageModel( - name = 'sd-3', - base_provider = 'Stability AI', - best_provider = ReplicateHome -) - sd_3_5 = ImageModel( name = 'sd-3.5', base_provider = 'Stability AI', best_provider = HuggingSpace ) -### Playground ### -playground_v2_5 = ImageModel( - name = 'playground-v2.5', - base_provider = 'Playground AI', - best_provider = ReplicateHome -) ### Flux AI ### flux = ImageModel( @@ -767,8 +756,6 @@ class ModelUtils: gemini_2_0_flash.name: gemini_2_0_flash, gemini_2_0_flash_thinking.name: gemini_2_0_flash_thinking, - # gemma - gemma_2b.name: gemma_2b, ### Anthropic ### # claude 3 @@ -787,8 +774,9 @@ class ModelUtils: blackboxai_pro.name: blackboxai_pro, ### CohereForAI ### - command_r_plus.name: command_r_plus, command_r.name: command_r, + command_r_plus.name: command_r_plus, + command_r7b.name: command_r7b, ### GigaChat ### gigachat.name: gigachat, @@ -870,12 +858,8 @@ class ModelUtils: ### Stability AI ### sdxl.name: sdxl, - sd_3.name: sd_3, sd_3_5.name: sd_3_5, - ### Playground ### - playground_v2_5.name: playground_v2_5, - ### Flux AI ### flux.name: flux, flux_pro.name: flux_pro, diff --git a/g4f/tools/run_tools.py b/g4f/tools/run_tools.py index f52623e9..fe97dc9c 100644 --- a/g4f/tools/run_tools.py +++ b/g4f/tools/run_tools.py @@ -28,6 +28,16 @@ def validate_arguments(data: dict) -> dict: return {} async def async_iter_run_tools(async_iter_callback, model, messages, tool_calls: Optional[list] = None, **kwargs): + # Handle web_search from kwargs + if kwargs.get('web_search'): + try: + messages = messages.copy() + messages[-1]["content"] = await do_search(messages[-1]["content"]) + except Exception as e: + debug.log(f"Couldn't do web search: {e.__class__.__name__}: {e}") + # Keep web_search in kwargs for provider native support + pass + if tool_calls is not None: for tool in tool_calls: if tool.get("type") == "function": @@ -67,6 +77,16 @@ def iter_run_tools( tool_calls: Optional[list] = None, **kwargs ) -> AsyncIterator: + # Handle web_search from kwargs + if kwargs.get('web_search'): + try: + messages = messages.copy() + messages[-1]["content"] = asyncio.run(do_search(messages[-1]["content"])) + except Exception as e: + debug.log(f"Couldn't do web search: {e.__class__.__name__}: {e}") + # Keep web_search in kwargs for provider native support + pass + if tool_calls is not None: for tool in tool_calls: if tool.get("type") == "function": @@ -77,14 +97,6 @@ def iter_run_tools( raise_search_exceptions=True, **tool["function"]["arguments"] ) - elif tool.get("function", {}).get("name") == "safe_search_tool": - tool["function"]["arguments"] = validate_arguments(tool["function"]) - try: - messages[-1]["content"] = asyncio.run(do_search(messages[-1]["content"], **tool["function"]["arguments"])) - except Exception as e: - debug.log(f"Couldn't do web search: {e.__class__.__name__}: {e}") - # Enable provider native web search - kwargs["web_search"] = True elif tool.get("function", {}).get("name") == "continue_tool": if provider not in ("OpenaiAccount", "HuggingFace"): last_line = messages[-1]["content"].strip().splitlines()[-1] @@ -107,4 +119,4 @@ def iter_run_tools( if has_bucket and isinstance(messages[-1]["content"], str): messages[-1]["content"] += BUCKET_INSTRUCTIONS - return iter_callback(model=model, messages=messages, provider=provider, **kwargs) \ No newline at end of file + return iter_callback(model=model, messages=messages, provider=provider, **kwargs) -- cgit v1.2.3