From 63c68c12e9c97120bc4e775295bc3340f27b2cc1 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Fri, 3 Jan 2025 08:34:37 +0200 Subject: Fix conflicts --- docs/providers-and-models.md | 40 ++-- g4f/Provider/Airforce.py | 1 - g4f/Provider/Blackbox.py | 419 ++++++++++++++++-------------------- g4f/Provider/ChatGptEs.py | 7 +- g4f/Provider/DDG.py | 140 ++++-------- g4f/Provider/needs_auth/GlhfChat.py | 29 +++ g4f/Provider/needs_auth/__init__.py | 5 +- g4f/Provider/needs_auth/glhfChat.py | 30 --- g4f/gui/client/index.html | 4 +- g4f/models.py | 19 +- 10 files changed, 288 insertions(+), 406 deletions(-) create mode 100644 g4f/Provider/needs_auth/GlhfChat.py delete mode 100644 g4f/Provider/needs_auth/glhfChat.py diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index 2f4b5fbf..2d359d6e 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -20,9 +20,9 @@ This document provides an overview of various AI providers and models, including ### Providers Free | Website | Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth | |----------|-------------|--------------|---------------|--------|--------|------|------| -|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`phi-2, gpt-4, gpt-4o-mini, gpt-4o, gpt-4-turbo, o1-mini, openchat-3.5, deepseek-coder, hermes-2-dpo, hermes-2-pro, openhermes-2.5, lfm-40b, german-7b, llama-2-7b, llama-3.1-8b, llama-3.1-70b, neural-7b, zephyr-7b, evil,`|`sdxl, flux-pro, flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌+✔| +|[api.airforce](https://api.airforce)|`g4f.Provider.Airforce`|`phi-2, openchat-3.5, deepseek-coder, hermes-2-dpo, hermes-2-pro, openhermes-2.5, lfm-40b, german-7b, llama-2-7b, llama-3.1-8b, llama-3.1-70b, neural-7b, zephyr-7b, evil,`|`sdxl, flux-pro, flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌+✔| |[amigochat.io](https://amigochat.io/chat/)|`g4f.Provider.AmigoChat`|✔|✔|❌|✔|![Error](https://img.shields.io/badge/RateLimit-f48d37)|❌| -|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, llama-3.3-70b, mixtral-7b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo`|`flux`|`blackboxai, gpt-4o, gemini-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| +|[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3.3-70b, mixtral-7b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo`|`flux`|`blackboxai, gpt-4o, gemini-pro, gemini-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[blackbox.ai](https://www.blackbox.ai)|`g4f.Provider.BlackboxCreateAgent`|`llama-3.1-70b`|`flux`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[chatgpt.com](https://chatgpt.com)|`g4f.Provider.ChatGpt`|✔|❌|❌|✔|![Error](https://img.shields.io/badge/HTTPError-f48d37)|❌| |[chatgpt.es](https://chatgpt.es)|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| @@ -58,6 +58,7 @@ This document provides an overview of various AI providers and models, including ### Providers Needs Auth | Provider | Text Models | Image Models | Vision Models | Stream | Status | Auth | |----------|-------------|--------------|---------------|--------|--------|------| +|[bing.com/images/create](https://www.bing.com/images/create)|`g4f.Provider.Anthropic`|✔|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[bing.com/images/create](https://www.bing.com/images/create)|`g4f.Provider.BingCreateImages`|❌|`dall-e-3`|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[inference.cerebras.ai](https://inference.cerebras.ai/)|`g4f.Provider.Cerebras`|✔|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|❌| |[deepinfra.com](https://deepinfra.com)|`g4f.Provider.DeepInfra`|✔|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|✔| @@ -65,6 +66,7 @@ This document provides an overview of various AI providers and models, including |[gemini.google.com](https://gemini.google.com)|`g4f.Provider.Gemini`|`gemini`|`gemini`|`gemini`|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[ai.google.dev](https://ai.google.dev)|`g4f.Provider.GeminiPro`|`gemini-pro`|❌|`gemini-pro`|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[github.com/copilot](https://github.com/copilot)|`g4f.Provider.GithubCopilot`|✔|❌|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| +|[glhf.chat](https://glhf.chat)|`g4f.Provider.GlhfChat`|✔|❌|❌|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[console.groq.com/playground](https://console.groq.com/playground)|`g4f.Provider.Groq`|✔|❌|✔|❌|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingChat`|`qwen-2.5-72b, llama-3.3-70b, command-r-plus, qwq-32b, nemotron-70b, nemotron-70b, llama-3.2-11b, hermes-3, mistral-nemo, phi-3.5-mini`|`flux-dev`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|✔| |[huggingface.co/chat](https://huggingface.co/chat)|`g4f.Provider.HuggingFace`|✔|✔|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)|✔| @@ -89,19 +91,21 @@ This document provides an overview of various AI providers and models, including ### Text Models | Model | Base Provider | Providers | Website | |-------|---------------|-----------|---------| -|gpt_35_turbo|OpenAI|2+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| +|gpt-3|OpenAI|2+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| +|gpt-3.5-turbo|OpenAI|2+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| |gpt-4|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)| -|gpt-4-turbo|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)| -|gpt-4o|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)| -|gpt-4o-mini|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)| +|gpt-4o|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)| +|gpt-4o-mini|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)| +|o1|OpenAI|1+ Providers|[platform.openai.com](https://openai.com/index/introducing-openai-o1-preview/)| |o1-preview|OpenAI|1+ Providers|[platform.openai.com](https://openai.com/index/introducing-openai-o1-preview/)| -|o1-mini|OpenAI|2+ Providers|[platform.openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)| +|o1-mini|OpenAI|1+ Providers|[platform.openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)| |gigachat||1+ Providers|[]( )| +|meta-ai|Meta|1+ Providers|[ai.meta.com](https://ai.meta.com/)| |llama-2-7b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)| |llama-3-8b|Meta Llama|1+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)| |llama-3.1-8b|Meta Llama|5+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| -|llama-3.1-70b|Meta Llama|12+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| -|llama-3.1-405b|Meta Llama|1+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| +|llama-3.1-70b|Meta Llama|10+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| +|llama-3.2-1b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)| |llama-3.2-11b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)| |llama-3.3-70b|Meta Llama|4+ Providers|[llama.com/]()| |mixtral-7b|Mistral AI|1+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)| @@ -110,22 +114,23 @@ This document provides an overview of various AI providers and models, including |mistral-large|Mistral AI|1+ Providers|[mistral.ai](https://mistral.ai/news/mistral-large-2407/)| |hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)| |hermes-2-pro|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B)| -|hermes-3|NousResearch|1+ Providers|[nousresearch.com](https://nousresearch.com/hermes3/)| +|hermes-3|NousResearch|2+ Providers|[nousresearch.com](https://nousresearch.com/hermes3/)| +|phi-2|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/phi-2)| +|phi-3.5-mini|Microsoft|2+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)| |gemini|Google DeepMind|1+ Providers|[deepmind.google](http://deepmind.google/technologies/gemini/)| |gemini-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| -|gemini-pro|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)| +|gemini-pro|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)| |gemma-2b|Google|1+ Providers|[huggingface.co](https://huggingface.co/google/gemma-2b)| |claude-3-haiku|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)| |claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| |claude-3-opus|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| -|claude-3.5-sonnet|Anthropic|3+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)| +|claude-3.5-sonnet|Anthropic|4+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)| |reka-core|Reka AI|1+ Providers|[reka.ai](https://www.reka.ai/ourmodels)| |blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)| |blackboxai-pro|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)| |command-r-plus|CohereForAI|1+ Providers|[docs.cohere.com](https://docs.cohere.com/docs/command-r-plus)| |command-r|CohereForAI|1+ Providers|[docs.cohere.com](https://docs.cohere.com/docs/command-r-plus)| -|qwen|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen)| -|qwen-1.5-7b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-7B)| +|qwen-1.5-7b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen1.5-7B)| |qwen-2-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2-72B)| |qwen-2.5-72b|Qwen|2+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct)| |qwen-2.5-coder-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-Coder-32B)| @@ -136,7 +141,7 @@ This document provides an overview of various AI providers and models, including |wizardlm-2-8x22b|WizardLM|1+ Providers|[huggingface.co](https://huggingface.co/alpindale/WizardLM-2-8x22B)| |openchat-3.5|OpenChat|1+ Providers|[huggingface.co](https://huggingface.co/openchat/openchat_3.5)| |grok-beta|x.ai|1+ Providers|[x.ai](https://x.ai/blog/grok-2)| -|sonar-online|Perplexity AI|2+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| +|sonar-online|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |sonar-chat|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |nemotron-70b|Nvidia|3+ Providers|[build.nvidia.com](https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct)| |openhermes-2.5|Teknium|1+ Providers|[huggingface.co](https://huggingface.co/datasets/teknium/OpenHermes-2.5)| @@ -146,6 +151,7 @@ This document provides an overview of various AI providers and models, including |neural-7b|Inferless|1+ Providers|[huggingface.co](https://huggingface.co/Intel/neural-chat-7b-v3-1)| |p1|PollinationsAI|1+ Providers|[]( )| |dbrx-instruct|Databricks|1+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)| +|p1|PollinationsAI|1+ Providers|[]()| |evil|Evil Mode - Experimental|2+ Providers|[]( )| |midijourney||1+ Providers|[]( )| |turbo||1+ Providers|[]( )| @@ -164,6 +170,7 @@ This document provides an overview of various AI providers and models, including |flux|Black Forest Labs|4+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)| |flux-pro|Black Forest Labs|2+ Providers|[github.com/black-forest-labs/flux](https://github.com/black-forest-labs/flux)| |flux-dev|Black Forest Labs|3+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-dev)| +|flux-schnell|Black Forest Labs|2+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-schnell)| |flux-realism|Flux AI|2+ Providers|[]( )| |flux-cablyai|Flux AI|1+ Providers|[]( )| |flux-anime|Flux AI|2+ Providers|[]( )| @@ -171,8 +178,7 @@ This document provides an overview of various AI providers and models, including |flux-disney|Flux AI|1+ Providers|[]( )| |flux-pixel|Flux AI|1+ Providers|[]( )| |flux-4o|Flux AI|1+ Providers|[]( )| -|flux-schnell|Black Forest Labs|1+ Providers|[huggingface.co](https://huggingface.co/black-forest-labs/FLUX.1-schnell)| -|dall-e-3|OpenAI|5+ Providers|[openai.com](https://openai.com/index/dall-e/)| +|dall-e-3|OpenAI|6+ Providers|[openai.com](https://openai.com/index/dall-e/)| |midjourney|Midjourney|2+ Providers|[docs.midjourney.com](https://docs.midjourney.com/docs/model-versions)| |any-dark||2+ Providers|[]( )| diff --git a/g4f/Provider/Airforce.py b/g4f/Provider/Airforce.py index 1444882c..02970211 100644 --- a/g4f/Provider/Airforce.py +++ b/g4f/Provider/Airforce.py @@ -45,7 +45,6 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin): additional_models_imagine = ["flux-1.1-pro", "midjourney", "dall-e-3"] model_aliases = { # Alias mappings for models - "gpt-4": "gpt-4o", "openchat-3.5": "openchat-3.5-0106", "deepseek-coder": "deepseek-coder-6.7b-instruct", "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index d3cd1ea1..00a08d84 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -1,42 +1,95 @@ from __future__ import annotations from aiohttp import ClientSession -import random -import string import json +import uuid import re import aiohttp -import asyncio from pathlib import Path -import concurrent.futures +from functools import wraps +from typing import Optional, Callable, Any from ..typing import AsyncResult, Messages, ImagesType from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from .helper import format_prompt from ..image import ImageResponse, to_data_uri from ..cookies import get_cookies_dir -from ..tools.web_search import get_search_message -from .helper import format_prompt - from .. import debug + +def cached_value(filename: str, cache_key: str = 'validated_value'): + """Universal cache decorator for both memory and file caching""" + def decorator(fetch_func: Callable) -> Callable: + memory_cache: Optional[str] = None + + @wraps(fetch_func) + async def wrapper(cls, *args, force_refresh=False, **kwargs) -> Any: + nonlocal memory_cache + + # If force refresh, clear caches + if force_refresh: + memory_cache = None + try: + cache_file = Path(get_cookies_dir()) / filename + if cache_file.exists(): + cache_file.unlink() + except Exception as e: + debug.log(f"Error clearing cache file: {e}") + + # Check memory cache first + if memory_cache is not None: + return memory_cache + + # Check file cache + cache_file = Path(get_cookies_dir()) / filename + try: + if cache_file.exists(): + with open(cache_file, 'r') as f: + data = json.load(f) + if data.get(cache_key): + memory_cache = data[cache_key] + return memory_cache + except Exception as e: + debug.log(f"Error reading cache file: {e}") + + # Fetch new value + try: + value = await fetch_func(cls, *args, **kwargs) + memory_cache = value + + # Save to file + cache_file.parent.mkdir(exist_ok=True) + try: + with open(cache_file, 'w') as f: + json.dump({cache_key: value}, f) + except Exception as e: + debug.log(f"Error writing to cache file: {e}") + + return value + except Exception as e: + debug.log(f"Error fetching value: {e}") + raise + + return wrapper + return decorator + + class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): label = "Blackbox AI" url = "https://www.blackbox.ai" api_endpoint = "https://www.blackbox.ai/api/chat" - + working = True supports_stream = True supports_system_message = True supports_message_history = True - + default_model = 'blackboxai' default_vision_model = default_model - default_image_model = 'flux' - image_models = ['ImageGeneration', 'repomap'] + default_image_model = 'ImageGeneration' + image_models = [default_image_model] vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] - web_search_models = ['blackboxai', 'meta-llama/Llama-3.3-70B-Instruct-Turbo', 'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro'] - userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'blackboxai-pro'] agentMode = { @@ -46,7 +99,6 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'mistralai/Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"}, 'deepseek-ai/deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"}, 'databricks/dbrx-instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"}, - 'meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro': {'mode': True, 'id': "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro", 'name': "Meta-Llama-3.1-405B-Instruct-Turbo"}, # 'Qwen/QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"}, 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"} } @@ -105,249 +157,152 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.2", "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat", "dbrx-instruct": "databricks/dbrx-instruct", - "llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro", "qwq-32b": "Qwen/QwQ-32B-Preview", "hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", ### image ### "flux": "ImageGeneration", } - - @classmethod - def _get_cache_file(cls) -> Path: - dir = Path(get_cookies_dir()) - dir.mkdir(exist_ok=True) - return dir / 'blackbox.json' - - @classmethod - def _load_cached_value(cls) -> str | None: - cache_file = cls._get_cache_file() - if cache_file.exists(): - try: - with open(cache_file, 'r') as f: - data = json.load(f) - return data.get('validated_value') - except Exception as e: - debug.log(f"Error reading cache file: {e}") - return None - - @classmethod - def _save_cached_value(cls, value: str): - cache_file = cls._get_cache_file() - try: - with open(cache_file, 'w') as f: - json.dump({'validated_value': value}, f) - except Exception as e: - debug.log(f"Error writing to cache file: {e}") @classmethod - async def fetch_validated(cls): - cached_value = cls._load_cached_value() - + @cached_value(filename='blackbox.json') + async def get_validated(cls) -> str: + """Fetch validated value from website""" async with aiohttp.ClientSession() as session: - # Let's try both URLs - urls_to_try = [ - "https://www.blackbox.ai", - "https://api.blackbox.ai" - ] - - for base_url in urls_to_try: - try: - async with session.get(base_url) as response: - if response.status != 200: - continue - - page_content = await response.text() - js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) + async with session.get(cls.url) as response: + if response.status != 200: + raise RuntimeError("Failed to get validated value") + + page_content = await response.text() + js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content) + + if not js_files: + js_files = re.findall(r'static/js/[a-zA-Z0-9-]+\.js', page_content) + + uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' + + def is_valid_context(text_around): + return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz') + + for js_file in js_files: + js_url = f"{cls.url}/_next/{js_file}" + try: + async with session.get(js_url) as js_response: + if js_response.status == 200: + js_content = await js_response.text() + for match in re.finditer(uuid_format, js_content): + start = max(0, match.start() - 10) + end = min(len(js_content), match.end() + 10) + context = js_content[start:end] + + if is_valid_context(context): + return match.group(1) + except Exception: + continue - if not js_files: - js_files = re.findall(r'static/js/[a-zA-Z0-9-]+\.js', page_content) - - uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']' - - def is_valid_context(text_around): - return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz') - - for js_file in js_files: - js_url = f"{base_url}/_next/{js_file}" - try: - async with session.get(js_url) as js_response: - if js_response.status == 200: - js_content = await js_response.text() - for match in re.finditer(uuid_format, js_content): - start = max(0, match.start() - 10) - end = min(len(js_content), match.end() + 10) - context = js_content[start:end] - - if is_valid_context(context): - validated_value = match.group(1) - cls._save_cached_value(validated_value) - return validated_value - except Exception: - continue - - except Exception as e: - debug.log(f"Error trying {base_url}: {e}") - continue - - # If we failed to get a new validated_value, we return the cached one - if cached_value: - return cached_value - - raise RuntimeError("Failed to get validated value from both URLs") - - @staticmethod - def generate_id(length=7): - characters = string.ascii_letters + string.digits - return ''.join(random.choice(characters) for _ in range(length)) + raise RuntimeError("Failed to get validated value") @classmethod async def create_async_generator( cls, model: str, messages: Messages, - prompt: str = None, proxy: str = None, + prompt: str = None, web_search: bool = False, images: ImagesType = None, - top_p: float = 0.9, - temperature: float = 0.5, + top_p: float = None, + temperature: float = None, max_tokens: int = None, - max_retries: int = 3, - delay: int = 1, **kwargs - ) -> AsyncResult: - - use_internal_search = web_search and model in cls.web_search_models + ) -> AsyncResult: + headers = { + "accept": "*/*", + "accept-language": "en-US,en;q=0.9", + "content-type": "application/json", + "origin": "https://www.blackbox.ai", + "referer": "https://www.blackbox.ai/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" + } - if web_search and not use_internal_search: - try: - # Create a timeout for web search - async def run_search(): - with concurrent.futures.ThreadPoolExecutor() as executor: - return await asyncio.get_event_loop().run_in_executor( - executor, - lambda: get_search_message(messages[-1]["content"]) - ) - - # Set a timeout of 10 seconds for web search - search_result = await asyncio.wait_for(run_search(), timeout=10.0) - messages[-1]["content"] = search_result - - except asyncio.TimeoutError: - debug.log("Web search timed out, proceeding with original message") - except Exception as e: - debug.log(f"Web search failed: {str(e)}, proceeding with original message") - - web_search = False + model = cls.get_model(model) - async def process_request(): - validated_value = await cls.fetch_validated() - - if not validated_value: - raise RuntimeError("Failed to get validated value") - - formatted_message = format_prompt(messages) - current_model = cls.get_model(model) + conversation_id = str(uuid.uuid4())[:7] + validated_value = await cls.get_validated() + + formatted_message = format_prompt(messages) - first_message = next((msg for msg in messages if msg['role'] == 'user'), None) - chat_id = cls.generate_id() - current_messages = [{"id": chat_id, "content": formatted_message, "role": "user"}] - - if images is not None: - current_messages[-1]['data'] = { - "imagesData": [ - { - "filePath": f"/{image_name}", - "contents": to_data_uri(image) - } - for image, image_name in images - ], - "fileText": "", - "title": "" - } - - headers = { - 'accept': '*/*', - 'accept-language': 'en-US,en;q=0.9', - 'content-type': 'application/json', - 'origin': 'https://www.blackbox.ai', - 'referer': 'https://www.blackbox.ai/', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' - } - - data = { - "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, - "clickedAnswer2": False, - "clickedAnswer3": False, - "clickedForceWebSearch": False, - "codeModelMode": True, - "deepSearchMode": False, - "domains": None, - "githubToken": None, - "id": chat_id, - "imageGenerationMode": False, - "isChromeExt": False, - "isMicMode": False, - "maxTokens": max_tokens, - "messages": current_messages, - "mobileClient": False, - "playgroundTemperature": temperature, - "playgroundTopP": top_p, - "previewToken": None, - "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, - "userId": None, - "userSelectedModel": model if model in cls.userSelectedModel else None, - "userSystemPrompt": None, - "validated": validated_value, - "visitFromDelta": False, - "webSearchModePrompt": False, - "webSearchMode": use_internal_search + first_message = next((msg for msg in messages if msg['role'] == 'user'), None) + current_messages = [{"id": conversation_id, "content": formatted_message, "role": "user"}] + + if images is not None: + current_messages[-1]['data'] = { + "imagesData": [ + { + "filePath": f"/{image_name}", + "contents": to_data_uri(image) + } + for image, image_name in images + ], + "fileText": "", + "title": "" } - - for attempt in range(max_retries): + + while True: + async with ClientSession(headers=headers) as session: + data = { + "messages": current_messages, + "id": conversation_id, + "previewToken": None, + "userId": None, + "codeModelMode": True, + "agentMode": cls.agentMode.get(model, {}) if model in cls.agentMode else {}, + "trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {}, + "isMicMode": False, + "userSystemPrompt": None, + "maxTokens": max_tokens, + "playgroundTopP": top_p, + "playgroundTemperature": temperature, + "isChromeExt": False, + "githubToken": "", + "clickedAnswer2": False, + "clickedAnswer3": False, + "clickedForceWebSearch": False, + "visitFromDelta": False, + "mobileClient": False, + "userSelectedModel": model if model in cls.userSelectedModel else None, + "validated": validated_value, + "imageGenerationMode": False, + "webSearchModePrompt": False, + "deepSearchMode": False, + "domains": None, + "webSearchMode": web_search + } + try: - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - response_text = await response.text() - - if current_model in cls.image_models: - image_matches = re.findall(r'!\[.*?\]\((https?://[^\)]+)\)', response_text) - if image_matches: - yield ImageResponse(image_matches[0], prompt) - return - - response_text = re.sub(r'Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai', '', response_text, flags=re.DOTALL) - response_text = re.sub(r'and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai', '', response_text, flags=re.DOTALL) - - response_text = response_text.strip() - - if not response_text: - raise ValueError("Empty response received") - - json_match = re.search(r'\$~~~\$(.*?)\$~~~\$', response_text, re.DOTALL) - if json_match: - search_results = json.loads(json_match.group(1)) - answer = response_text.split('$~~~$')[-1].strip() - - formatted_response = f"{answer}\n\n**Source:**" - for i, result in enumerate(search_results, 1): - formatted_response += f"\n{i}. {result['title']}: {result['link']}" - - yield formatted_response - else: - yield response_text + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + first_chunk = True + content_received = False + async for chunk in response.content: + if chunk: + content_received = True + decoded = chunk.decode() + if first_chunk and "Generated by BLACKBOX.AI" in decoded: + validated_value = await cls.get_validated(force_refresh=True) + break + first_chunk = False + if model in cls.image_models and decoded.startswith("![]("): + image_url = decoded.strip("![]()") + prompt = messages[-1]["content"] + yield ImageResponse(images=image_url, alt=prompt) + else: + yield decoded + else: + if not content_received: + debug.log("Empty response received from Blackbox API, retrying...") + continue return - except Exception as e: - debug.log(f"Error: {str(e)}") - if attempt == max_retries - 1: - raise RuntimeError("Failed after all retries") - else: - wait_time = delay * (2 ** attempt) + random.uniform(0, 1) - debug.log(f"Attempt {attempt + 1} failed. Retrying in {wait_time:.2f} seconds...") - await asyncio.sleep(wait_time) - - async for chunk in process_request(): - yield chunk + debug.log(f"Error in request: {e}") + raise diff --git a/g4f/Provider/ChatGptEs.py b/g4f/Provider/ChatGptEs.py index 3c6b42ea..d42baffd 100644 --- a/g4f/Provider/ChatGptEs.py +++ b/g4f/Provider/ChatGptEs.py @@ -54,10 +54,8 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): post_id = re.findall(r'data-post-id="(.+?)"', await initial_response.text())[0] formatted_prompt = format_prompt(messages) - - conversation_history = [ - "Human: You are a helpful AI assistant. Please respond in the same language that the user uses in their message. Provide accurate, relevant and helpful information while maintaining a friendly and professional tone. If you're not sure about something, please acknowledge that and provide the best information you can while noting any uncertainties. Focus on being helpful while respecting the user's choice of language." - ] + + conversation_history = [] for message in messages[:-1]: if message['role'] == "user": @@ -66,6 +64,7 @@ class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin): conversation_history.append(f"AI: {message['content']}") payload = { + 'wpaicg_user_agree': '1', '_wpnonce': nonce_, 'post_id': post_id, 'url': cls.url, diff --git a/g4f/Provider/DDG.py b/g4f/Provider/DDG.py index ae418c16..1f428901 100644 --- a/g4f/Provider/DDG.py +++ b/g4f/Provider/DDG.py @@ -1,132 +1,72 @@ from __future__ import annotations +from aiohttp import ClientSession, ClientTimeout import json -import aiohttp -from aiohttp import ClientSession, BaseConnector from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation -from .helper import format_prompt -from ..requests.aiohttp import get_connector -from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -MODELS = [ - {"model":"gpt-4o","modelName":"GPT-4o","modelVariant":None,"modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"4"}, - {"model":"gpt-4o-mini","modelName":"GPT-4o","modelVariant":"mini","modelStyleId":"gpt-4o-mini","createdBy":"OpenAI","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"3"}, - {"model":"claude-3-5-sonnet-20240620","modelName":"Claude 3.5","modelVariant":"Sonnet","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"7"}, - {"model":"claude-3-opus-20240229","modelName":"Claude 3","modelVariant":"Opus","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":1,"inputCharLimit":16e3,"settingId":"2"}, - {"model":"claude-3-haiku-20240307","modelName":"Claude 3","modelVariant":"Haiku","modelStyleId":"claude-3-haiku","createdBy":"Anthropic","moderationLevel":"HIGH","isAvailable":0,"inputCharLimit":16e3,"settingId":"1"}, - {"model":"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo","modelName":"Llama 3.1","modelVariant":"70B","modelStyleId":"llama-3","createdBy":"Meta","moderationLevel":"MEDIUM","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"5"}, - {"model":"mistralai/Mixtral-8x7B-Instruct-v0.1","modelName":"Mixtral","modelVariant":"8x7B","modelStyleId":"mixtral","createdBy":"Mistral AI","moderationLevel":"LOW","isAvailable":0,"isOpenSource":0,"inputCharLimit":16e3,"settingId":"6"}, - {"model":"Qwen/Qwen2.5-Coder-32B-Instruct","modelName":"Qwen 2.5 Coder","modelVariant":"32B","modelStyleId":"qwen","createdBy":"Alibaba Cloud","moderationLevel":"LOW","isAvailable":0,"isOpenSource":1,"inputCharLimit":16e3,"settingId":"90"} -] - -class Conversation(BaseConversation): - vqd: str = None - message_history: Messages = [] - - def __init__(self, model: str): - self.model = model class DDG(AsyncGeneratorProvider, ProviderModelMixin): label = "DuckDuckGo AI Chat" url = "https://duckduckgo.com/aichat" + url_status = "https://duckduckgo.com/duckchat/v1/status" api_endpoint = "https://duckduckgo.com/duckchat/v1/chat" + working = True supports_stream = True supports_system_message = True supports_message_history = True - - default_model = "gpt-4o-mini" - models = [model.get("model") for model in MODELS] + + default_model = 'gpt-4o-mini' + models = [default_model, 'claude-3-haiku-20240307', 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 'mistralai/Mixtral-8x7B-Instruct-v0.1'] + + model_aliases = { + "gpt-4": "gpt-4o-mini", + } model_aliases = { + "gpt-4": default_model, "claude-3-haiku": "claude-3-haiku-20240307", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", - "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", - "gpt-4": "gpt-4o-mini", + "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1" } - @classmethod - async def get_vqd(cls, proxy: str, connector: BaseConnector = None): - status_url = "https://duckduckgo.com/duckchat/v1/status" - headers = { - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', - 'Accept': 'text/event-stream', - 'x-vqd-accept': '1' - } - async with aiohttp.ClientSession(connector=get_connector(connector, proxy)) as session: - async with session.get(status_url, headers=headers) as response: - await raise_for_status(response) - return response.headers.get("x-vqd-4") - @classmethod async def create_async_generator( cls, model: str, messages: Messages, - conversation: Conversation = None, - return_conversation: bool = False, proxy: str = None, - connector: BaseConnector = None, **kwargs ) -> AsyncResult: - model = cls.get_model(model) - - is_new_conversation = False - if conversation is None: - conversation = Conversation(model) - is_new_conversation = True - - if conversation.vqd is None: - conversation.vqd = await cls.get_vqd(proxy, connector) - if not conversation.vqd: - raise Exception("Failed to obtain VQD token") - - headers = { - 'accept': 'text/event-stream', - 'content-type': 'application/json', - 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36', - 'x-vqd-4': conversation.vqd, - } - - async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: - if is_new_conversation: - conversation.message_history = [{"role": "user", "content": format_prompt(messages)}] - else: - if len(messages) >= 2: - conversation.message_history = [ - *conversation.message_history, - messages[-2], - messages[-1] - ] - elif len(messages) == 1: - conversation.message_history = [ - *conversation.message_history, - messages[-1] - ] - - if return_conversation: - yield conversation - - data = { - "model": conversation.model, - "messages": conversation.message_history + async with ClientSession(timeout=ClientTimeout(total=30)) as session: + # Fetch VQD token + async with session.get(cls.url_status, + headers={"x-vqd-accept": "1"}) as response: + if response.status != 200: + raise Exception(f"Failed to fetch VQD token: {response.status}") + vqd = response.headers.get("x-vqd-4", "") + if not vqd: + raise Exception("Failed to fetch VQD token: Empty token.") + + headers = { + "Content-Type": "application/json", + "x-vqd-4": vqd, } - async with session.post(cls.api_endpoint, json=data) as response: - conversation.vqd = response.headers.get("x-vqd-4") - await raise_for_status(response) + payload = { + "model": model, + "messages": messages, + } + async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response: + response.raise_for_status() async for line in response.content: - if line: - decoded_line = line.decode('utf-8') - if decoded_line.startswith('data: '): - json_str = decoded_line[6:] - if json_str == '[DONE]': - break - try: - json_data = json.loads(json_str) - if 'message' in json_data: - yield json_data['message'] - except json.JSONDecodeError: - pass + line = line.decode("utf-8").strip() + if line.startswith("data:"): + try: + message = json.loads(line[5:].strip()) + if "message" in message: + yield message["message"] + except json.JSONDecodeError: + continue diff --git a/g4f/Provider/needs_auth/GlhfChat.py b/g4f/Provider/needs_auth/GlhfChat.py new file mode 100644 index 00000000..c0bf8e7e --- /dev/null +++ b/g4f/Provider/needs_auth/GlhfChat.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +from .OpenaiAPI import OpenaiAPI + +class GlhfChat(OpenaiAPI): + label = "GlhfChat" + url = "https://glhf.chat" + api_base = "https://glhf.chat/api/openai/v1" + working = True + model_aliases = { + 'Qwen2.5-Coder-32B-Instruct': 'hf:Qwen/Qwen2.5-Coder-32B-Instruct', + 'Llama-3.1-405B-Instruct': 'hf:meta-llama/Llama-3.1-405B-Instruct', + 'Llama-3.1-70B-Instruct': 'hf:meta-llama/Llama-3.1-70B-Instruct', + 'Llama-3.1-8B-Instruct': 'hf:meta-llama/Llama-3.1-8B-Instruct', + 'Llama-3.2-3B-Instruct': 'hf:meta-llama/Llama-3.2-3B-Instruct', + 'Llama-3.2-11B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-11B-Vision-Instruct', + 'Llama-3.2-90B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-90B-Vision-Instruct', + 'Qwen2.5-72B-Instruct': 'hf:Qwen/Qwen2.5-72B-Instruct', + 'Llama-3.3-70B-Instruct': 'hf:meta-llama/Llama-3.3-70B-Instruct', + 'gemma-2-9b-it': 'hf:google/gemma-2-9b-it', + 'gemma-2-27b-it': 'hf:google/gemma-2-27b-it', + 'Mistral-7B-Instruct-v0.3': 'hf:mistralai/Mistral-7B-Instruct-v0.3', + 'Mixtral-8x7B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x7B-Instruct-v0.1', + 'Mixtral-8x22B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x22B-Instruct-v0.1', + 'Nous-Hermes-2-Mixtral-8x7B-DPO': 'hf:NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', + 'Qwen2.5-7B-Instruct': 'hf:Qwen/Qwen2.5-7B-Instruct', + 'SOLAR-10.7B-Instruct-v1.0': 'hf:upstage/SOLAR-10.7B-Instruct-v1.0', + 'Llama-3.1-Nemotron-70B-Instruct-HF': 'hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' + } diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py index d0acd6fc..24282f0f 100644 --- a/g4f/Provider/needs_auth/__init__.py +++ b/g4f/Provider/needs_auth/__init__.py @@ -1,6 +1,5 @@ from .gigachat import * -from .Anthropic import Anthropic from .BingCreateImages import BingCreateImages from .Cerebras import Cerebras from .CopilotAccount import CopilotAccount @@ -9,7 +8,7 @@ from .DeepInfraImage import DeepInfraImage from .Gemini import Gemini from .GeminiPro import GeminiPro from .GithubCopilot import GithubCopilot -from .glhfChat import glhfChat +from .GlhfChat import GlhfChat from .Groq import Groq from .HuggingChat import HuggingChat from .HuggingFace import HuggingFace @@ -28,4 +27,4 @@ from .Replicate import Replicate from .Theb import Theb from .ThebApi import ThebApi from .WhiteRabbitNeo import WhiteRabbitNeo -from .xAI import xAI \ No newline at end of file +from .xAI import xAI diff --git a/g4f/Provider/needs_auth/glhfChat.py b/g4f/Provider/needs_auth/glhfChat.py deleted file mode 100644 index 802b9590..00000000 --- a/g4f/Provider/needs_auth/glhfChat.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import annotations - -from .OpenaiAPI import OpenaiAPI - -class glhfChat(OpenaiAPI): - label = "glhf.chat" - url = "https://glhf.chat" - login_url = "https://glhf.chat/users/settings/api" - api_base = "https://glhf.chat/api/openai/v1" - working = True - model_aliases = { - 'Qwen2.5-Coder-32B-Instruct': 'hf:Qwen/Qwen2.5-Coder-32B-Instruct', - 'Llama-3.1-405B-Instruct': 'hf:meta-llama/Llama-3.1-405B-Instruct', - 'Llama-3.1-70B-Instruct': 'hf:meta-llama/Llama-3.1-70B-Instruct', - 'Llama-3.1-8B-Instruct': 'hf:meta-llama/Llama-3.1-8B-Instruct', - 'Llama-3.2-3B-Instruct': 'hf:meta-llama/Llama-3.2-3B-Instruct', - 'Llama-3.2-11B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-11B-Vision-Instruct', - 'Llama-3.2-90B-Vision-Instruct': 'hf:meta-llama/Llama-3.2-90B-Vision-Instruct', - 'Qwen2.5-72B-Instruct': 'hf:Qwen/Qwen2.5-72B-Instruct', - 'Llama-3.3-70B-Instruct': 'hf:meta-llama/Llama-3.3-70B-Instruct', - 'gemma-2-9b-it': 'hf:google/gemma-2-9b-it', - 'gemma-2-27b-it': 'hf:google/gemma-2-27b-it', - 'Mistral-7B-Instruct-v0.3': 'hf:mistralai/Mistral-7B-Instruct-v0.3', - 'Mixtral-8x7B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x7B-Instruct-v0.1', - 'Mixtral-8x22B-Instruct-v0.1': 'hf:mistralai/Mixtral-8x22B-Instruct-v0.1', - 'Nous-Hermes-2-Mixtral-8x7B-DPO': 'hf:NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO', - 'Qwen2.5-7B-Instruct': 'hf:Qwen/Qwen2.5-7B-Instruct', - 'SOLAR-10.7B-Instruct-v1.0': 'hf:upstage/SOLAR-10.7B-Instruct-v1.0', - 'Llama-3.1-Nemotron-70B-Instruct-HF': 'hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF' - } \ No newline at end of file diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index 6bb2c803..c99bb47f 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -174,8 +174,8 @@