From d0e80e50074604bd8e0b2950fc54c1cd37198ee0 Mon Sep 17 00:00:00 2001 From: Parth Sadaria <146802298+Parthsadaria@users.noreply.github.com> Date: Sun, 2 Feb 2025 15:23:02 +0530 Subject: Update PerplexityLabs.py fixed GeneratorExit exception --- g4f/Provider/PerplexityLabs.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/g4f/Provider/PerplexityLabs.py b/g4f/Provider/PerplexityLabs.py index 1d06784d..0688575c 100644 --- a/g4f/Provider/PerplexityLabs.py +++ b/g4f/Provider/PerplexityLabs.py @@ -88,5 +88,6 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): if data["final"]: yield FinishReason("stop") break - except: - raise RuntimeError(f"Message: {message}") + except Exception as e: + print(f"Error processing message: {message} - {e}") + raise RuntimeError(f"Message: {message}") from e -- cgit v1.2.3 From 5ddb5ac01eb5e7823b694250535dfe293d12ad17 Mon Sep 17 00:00:00 2001 From: Juxoola Date: Mon, 3 Feb 2025 09:34:23 +0300 Subject: Fix error __str__ returned non-string (type list) when using reasoning model --- g4f/client/__init__.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index 50a614c3..31de9a18 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -73,7 +73,18 @@ def iter_response( elif isinstance(chunk, Exception): continue - chunk = str(chunk) + if isinstance(chunk, list): + chunk = "".join(map(str, chunk)) + else: + + temp = chunk.__str__() + if not isinstance(temp, str): + if isinstance(temp, list): + temp = "".join(map(str, temp)) + else: + temp = repr(chunk) + chunk = temp + content += chunk if max_tokens is not None and idx + 1 >= max_tokens: -- cgit v1.2.3 From ec8caec579f8407e48f2d410dbda7e26373cf84f Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Mon, 3 Feb 2025 14:17:33 +0200 Subject: Updated some providers, added new providers and added new models --- docs/providers-and-models.md | 32 ++++---- g4f/Provider/Blackbox.py | 89 +++++++++++++--------- g4f/Provider/BlackboxAPI.py | 103 +++++++++++++++++++++++++ g4f/Provider/CablyAI.py | 175 +++++++++++++++++++++++++++++++++++++------ g4f/Provider/__init__.py | 1 + g4f/models.py | 44 ++++++----- 6 files changed, 352 insertions(+), 92 deletions(-) create mode 100644 g4f/Provider/BlackboxAPI.py diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index ab8fe6af..7fbb5fae 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -39,8 +39,9 @@ This document provides an overview of various AI providers and models, including |----------|-------------|--------------|---------------|--------|--------|------|------| |[aichatfree.info](https://aichatfree.info)|No auth required|`g4f.Provider.AIChatFree`|`gemini-1.5-pro` _**(1+)**_|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[autonomous.ai](https://www.autonomous.ai/anon/)|No auth required|`g4f.Provider.AutonomousAI`|`llama-3.3-70b, qwen-2.5-coder-32b, hermes-3, llama-3.2-90b, llama-3.3-70b, llama-3-2-70b`|✔|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| -|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, gemini-1.5-flash, gemini-1.5-pro, claude-3.5-sonnet, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-7b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1` _**(+31)**_|`flux`|`blackboxai, gpt-4o, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)| -|[cablyai.com](https://cablyai.com)|No auth required|`g4f.Provider.CablyAI`|`cably-80b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| +|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gemini-1.5-flash, gemini-1.5-pro, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1` _**(+34)**_|`flux`|`blackboxai, gpt-4o, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)| +|[api.blackbox.ai](https://api.blackbox.ai)|No auth required|`g4f.Provider.BlackboxAPI`|`deepseek-v3, deepseek-r1, deepseek-chat, mixtral-small-28b, dbrx-instruct, qwq-32b, hermes-2-dpo`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| +|[cablyai.com](https://cablyai.com)|Optional API key|`g4f.Provider.CablyAI`|`gpt-4o-mini, llama-3.1-8b, deepseek-v3, deepseek-r1, o3-mini-low` _**(2+)**_|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[chatglm.cn](https://chatglm.cn)|No auth required|`g4f.Provider.ChatGLM`|`glm-4`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[chatgpt.com](https://chatgpt.com)|No auth required|`g4f.Provider.ChatGpt`|✔ _**(+7)**_|❌|❌|✔|![Error](https://img.shields.io/badge/HTTPError-f48d37)| |[chatgpt.es](https://chatgpt.es)|No auth required|`g4f.Provider.ChatGptEs`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| @@ -126,18 +127,19 @@ This document provides an overview of various AI providers and models, including |-------|---------------|-----------|---------| |gpt-3|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| |gpt-3.5-turbo|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| -|gpt-4|OpenAI|11+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)| -|gpt-4o|OpenAI|9+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)| -|gpt-4o-mini|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)| +|gpt-4|OpenAI|10+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)| +|gpt-4o|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)| +|gpt-4o-mini|OpenAI|9+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)| |o1|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)| |o1-preview|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)| |o1-mini|OpenAI|1+ Providers|[openai.com](https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/)| +|o3-mini-low|OpenAI|1+ Providers|[openai.com](https://openai.com/index/openai-o3-mini/)| |gigachat|GigaChat|1+ Providers|[developers.sber.ru/gigachat](https://developers.sber.ru/gigachat)| |meta-ai|Meta|1+ Providers|[ai.meta.com](https://ai.meta.com/)| |llama-2-7b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-2-7b)| |llama-3-8b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)| |llama-3-70b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Meta-Llama-3-70B)| -|llama-3.1-8b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| +|llama-3.1-8b|Meta Llama|7+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| |llama-3.1-70b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| |llama-3.1-405b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.1-405B)| |llama-3.2-1b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)| @@ -145,10 +147,10 @@ This document provides an overview of various AI providers and models, including |llama-3.2-11b|Meta Llama|3+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-2-connect-2024-vision-edge-mobile-devices/)| |llama-3.2-90b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-90B-Vision)| |llama-3.3-70b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/llama-3-3/)| -|mixtral-7b|Mistral|1+|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)| |mixtral-8x7b|Mistral|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-of-experts/)| |mistral-nemo|Mistral|3+ Providers|[huggingface.co](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)| -|hermes-2-dpo|NousResearch|1+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)| +|mixtral-small-28b|Mistral|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-small-28b/)| +|hermes-2-dpo|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)| |phi-3.5-mini|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)| |wizardlm-2-7b|Microsoft|1+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)| |wizardlm-2-8x22b|Microsoft|2+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)| @@ -161,7 +163,7 @@ This document provides an overview of various AI providers and models, including |claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)| |claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| |claude-3-opus|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| -|claude-3.5-sonnet|Anthropic|3+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)| +|claude-3.5-sonnet|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-5-sonnet)| |reka-core|Reka AI|1+ Providers|[reka.ai](https://www.reka.ai/ourmodels)| |blackboxai|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)| |blackboxai-pro|Blackbox AI|1+ Providers|[docs.blackbox.chat](https://docs.blackbox.chat/blackbox-ai-1)| @@ -174,18 +176,18 @@ This document provides an overview of various AI providers and models, including |qwen-2.5-72b|Qwen|3+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-72B-Instruct)| |qwen-2.5-coder-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-Coder-32B)| |qwen-2.5-1m-demo|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/Qwen2.5-1M-Demo)| -|qwq-32b|Qwen|4+ Providers|[huggingface.co](https://huggingface.co/Qwen/QwQ-32B-Preview)| +|qwq-32b|Qwen|5+ Providers|[huggingface.co](https://huggingface.co/Qwen/QwQ-32B-Preview)| |qvq-72b|Qwen|1+ Providers|[huggingface.co](https://huggingface.co/Qwen/QVQ-72B-Preview)| |pi|Inflection|1+ Providers|[inflection.ai](https://inflection.ai/blog/inflection-2-5)| -|deepseek-chat|DeepSeek|3+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)| -|deepseek-v3|DeepSeek|2+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)| -|deepseek-r1|DeepSeek|6+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)| +|deepseek-chat|DeepSeek|4+ Providers|[huggingface.co](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat)| +|deepseek-v3|DeepSeek|4+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)| +|deepseek-r1|DeepSeek|8+ Providers|[api-docs.deepseek.com](https://api-docs.deepseek.com/news/news250120)| |grok-2|x.ai|1+|[x.ai](https://x.ai/blog/grok-2)| |sonar|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |sonar-pro|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |sonar-reasoning|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |nemotron-70b|Nvidia|3+ Providers|[build.nvidia.com](https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct)| -|dbrx-instruct|Databricks|1+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)| +|dbrx-instruct|Databricks|2+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)| |p1|PollinationsAI|1+ Providers|[pollinations.ai](https://pollinations.ai/)| |cably-80b|CablyAI|1+ Providers|[cablyai.com](https://cablyai.com)| |glm-4|THUDM|1+ Providers|[github.com/THUDM](https://github.com/THUDM/GLM-4)| @@ -193,7 +195,6 @@ This document provides an overview of various AI providers and models, including |evil|Evil Mode - Experimental|1+ Providers|| --- - ### Image Models | Model | Base Provider | Providers | Website | |-------|---------------|-----------|---------| @@ -207,6 +208,7 @@ This document provides an overview of various AI providers and models, including |midjourney|Midjourney|1+ Providers|[docs.midjourney.com](https://docs.midjourney.com/docs/model-versions)| + ## Conclusion and Usage Tips This document provides a comprehensive overview of various AI providers and models available for text generation, image generation, and vision tasks. **When choosing a provider or model, consider the following factors:** 1. **Availability**: Check the status of the provider to ensure it's currently active and accessible. diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 61982eea..f2de1367 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -38,16 +38,17 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): default_model = "blackboxai" default_vision_model = default_model default_image_model = 'ImageGeneration' - image_models = [default_image_model, "ImageGeneration2"] - vision_models = [default_vision_model, 'gpt-4o', 'gemini-pro', 'deepseek-v3', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] - reasoning_models = ['deepseek-r1'] - - userSelectedModel = ['gpt-4o', 'gemini-pro', 'claude-sonnet-3.5', 'deepseek-r1', 'deepseek-v3', 'blackboxai-pro'] + image_models = [default_image_model] + vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'DeepSeek-V3', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] + reasoning_models = ['DeepSeek-R1'] + + userSelectedModel = ['gpt-4o', 'o3-mini', 'claude-sonnet-3.5', 'gemini-pro', 'blackboxai-pro'] agentMode = { - 'ImageGeneration': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"}, + 'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"}, + 'DeepSeek-R1': {'mode': True, 'id': "deepseek-reasoner", 'name': "DeepSeek-R1"}, 'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"}, - 'Mistral-(7B)-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-(7B)-Instruct-v0.2"}, + 'Mistral-Small-24B-Instruct-2501': {'mode': True, 'id': "mistralai/Mistral-Small-24B-Instruct-2501", 'name': "Mistral-Small-24B-Instruct-2501"}, 'DeepSeek-LLM-Chat-(67B)': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "DeepSeek-LLM-Chat-(67B)"}, 'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"}, 'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"}, @@ -96,12 +97,12 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): models = list(dict.fromkeys([default_model, *userSelectedModel, *reasoning_models, *image_models, *list(agentMode.keys()), *list(trendingAgentMode.keys())])) model_aliases = { - "gpt-4": "gpt-4o", "gemini-1.5-flash": "gemini-1.5-flash", "gemini-1.5-pro": "gemini-pro", - "claude-3.5-sonnet": "claude-sonnet-3.5", + "deepseek-v3": "DeepSeek-V3", + "deepseek-r1": "DeepSeek-R1", "llama-3.3-70b": "Meta-Llama-3.3-70B-Instruct-Turbo", - "mixtral-7b": "Mistral-(7B)-Instruct-v0.2", + "mixtral-small-28b": "Mistral-Small-24B-Instruct-2501", "deepseek-chat": "DeepSeek-LLM-Chat-(67B)", "dbrx-instruct": "DBRX-Instruct", "qwq-32b": "Qwen-QwQ-32B-Preview", @@ -196,7 +197,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): } async with ClientSession(headers=headers) as session: - if model == "ImageGeneration2": + if model in "ImageGeneration": prompt = format_image_prompt(messages, prompt) data = { "query": format_image_prompt(messages, prompt), @@ -294,32 +295,48 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): if not text_to_yield or text_to_yield.isspace(): return - if model in cls.image_models: - image_url_match = re.search(r'!\[.*?\]\((.*?)\)', text_to_yield) - if image_url_match: - image_url = image_url_match.group(1) - prompt = format_image_prompt(messages, prompt) - yield ImageResponse(images=[image_url], alt=prompt) - else: - if "Generated by BLACKBOX.AI" in text_to_yield: - conversation.validated_value = await cls.fetch_validated(force_refresh=True) - if conversation.validated_value: - data["validated"] = conversation.validated_value - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as new_response: - await raise_for_status(new_response) - new_response_text = await new_response.text() - new_parts = new_response_text.split('$~~~$') - new_text = new_parts[2] if len(new_parts) >= 3 else new_response_text - - if new_text and not new_text.isspace(): - yield new_text - else: - if text_to_yield and not text_to_yield.isspace(): - yield text_to_yield + if model in cls.reasoning_models and "\n\n\n" in text_to_yield: + think_split = text_to_yield.split("\n\n\n", 1) + if len(think_split) > 1: + think_content, answer = think_split[0].strip(), think_split[1].strip() + yield Reasoning(status=think_content) + yield answer + else: + yield text_to_yield + elif "" in text_to_yield: + pre_think, rest = text_to_yield.split('', 1) + think_content, post_think = rest.split('', 1) + + pre_think = pre_think.strip() + think_content = think_content.strip() + post_think = post_think.strip() + + if pre_think: + yield pre_think + if think_content: + yield Reasoning(status=think_content) + if post_think: + yield post_think + + elif "Generated by BLACKBOX.AI" in text_to_yield: + conversation.validated_value = await cls.fetch_validated(force_refresh=True) + if conversation.validated_value: + data["validated"] = conversation.validated_value + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as new_response: + await raise_for_status(new_response) + new_response_text = await new_response.text() + new_parts = new_response_text.split('$~~~$') + new_text = new_parts[2] if len(new_parts) >= 3 else new_response_text + + if new_text and not new_text.isspace(): + yield new_text else: if text_to_yield and not text_to_yield.isspace(): yield text_to_yield + else: + if text_to_yield and not text_to_yield.isspace(): + yield text_to_yield - if return_conversation: - conversation.message_history.append({"role": "assistant", "content": text_to_yield}) - yield conversation + if return_conversation: + conversation.message_history.append({"role": "assistant", "content": text_to_yield}) + yield conversation diff --git a/g4f/Provider/BlackboxAPI.py b/g4f/Provider/BlackboxAPI.py new file mode 100644 index 00000000..d14429e4 --- /dev/null +++ b/g4f/Provider/BlackboxAPI.py @@ -0,0 +1,103 @@ +from __future__ import annotations +import json +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..requests.raise_for_status import raise_for_status +from ..providers.response import Reasoning +from .helper import format_prompt + +class BlackboxAPI(AsyncGeneratorProvider, ProviderModelMixin): + label = "Blackbox AI API" + url = "https://api.blackbox.ai" + api_endpoint = "https://api.blackbox.ai/api/chat" + + working = True + needs_auth = False + supports_stream = False + supports_system_message = True + supports_message_history = True + + default_model = 'deepseek-ai/DeepSeek-V3' + reasoning_models = ['deepseek-ai/DeepSeek-R1'] + models = [ + default_model, + 'mistralai/Mistral-Small-24B-Instruct-2501', + 'deepseek-ai/deepseek-llm-67b-chat', + 'databricks/dbrx-instruct', + 'Qwen/QwQ-32B-Preview', + 'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO' + ] + reasoning_models + + model_aliases = { + "deepseek-v3": "deepseek-ai/DeepSeek-V3", + "deepseek-r1": "deepseek-ai/DeepSeek-R1", + "deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat", + "mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501", + "dbrx-instruct": "databricks/dbrx-instruct", + "qwq-32b": "Qwen/QwQ-32B-Preview", + "hermes-2-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + max_tokens: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "Content-Type": "application/json", + } + + async with ClientSession(headers=headers) as session: + data = { + "messages": messages, + "model": model, + "max_tokens": max_tokens + } + + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + + is_reasoning = False + current_reasoning = "" + + async for chunk in response.content: + if not chunk: + continue + + text = chunk.decode(errors='ignore') + + if model in cls.reasoning_models: + if "" in text: + text = text.replace("", "") + is_reasoning = True + current_reasoning = text + continue + + if "" in text: + text = text.replace("", "") + is_reasoning = False + current_reasoning += text + yield Reasoning(status=current_reasoning.strip()) + current_reasoning = "" + continue + + if is_reasoning: + current_reasoning += text + continue + + try: + if text: + yield text + except Exception as e: + return + + if is_reasoning and current_reasoning: + yield Reasoning(status=current_reasoning.strip()) diff --git a/g4f/Provider/CablyAI.py b/g4f/Provider/CablyAI.py index d13d7a58..5ec045df 100644 --- a/g4f/Provider/CablyAI.py +++ b/g4f/Provider/CablyAI.py @@ -1,37 +1,166 @@ from __future__ import annotations +import json +from typing import AsyncGenerator +from aiohttp import ClientSession + from ..typing import AsyncResult, Messages -from .template import OpenaiTemplate +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..requests.raise_for_status import raise_for_status +from ..providers.response import FinishReason, Reasoning + -class CablyAI(OpenaiTemplate): +class CablyAI(AsyncGeneratorProvider, ProviderModelMixin): + label = "CablyAI" url = "https://cablyai.com" - login_url = None - needs_auth = False - api_base = "https://cablyai.com/v1" + api_endpoint = "https://cablyai.com/v1/chat/completions" + api_key = "sk-your-openai-api-key" + working = True - - default_model = "Cably-80B" - models = [default_model] - model_aliases = {"cably-80b": default_model} + needs_auth = False + supports_stream = True + supports_system_message = True + supports_message_history = True + + default_model = 'gpt-4o-mini' + reasoning_models = ['deepseek-r1-uncensored'] + models = [ + default_model, + 'searchgpt', + 'llama-3.1-8b-instruct', + 'deepseek-v3', + 'tinyswallow1.5b', + 'andy-3.5', + 'o3-mini-low', + ] + reasoning_models + + model_aliases = { + "gpt-4o-mini": "searchgpt", + "llama-3.1-8b": "llama-3.1-8b-instruct", + "deepseek-r1": "deepseek-r1-uncensored", + } @classmethod - def create_async_generator( + async def create_async_generator( cls, model: str, messages: Messages, + api_key: str = None, + stream: bool = True, + proxy: str = None, **kwargs - ) -> AsyncResult: + ) -> AsyncResult: + model = cls.get_model(model) + api_key = api_key or cls.api_key + headers = { - 'Accept': '*/*', - 'Accept-Language': 'en-US,en;q=0.9', - 'Content-Type': 'application/json', - 'Origin': 'https://cablyai.com', - 'Referer': 'https://cablyai.com/chat', - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' + "Accept": "*/*", + "Accept-Language": "en-US,en;q=0.9", + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + "Origin": cls.url, + "Referer": f"{cls.url}/chat", + "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" } - return super().create_async_generator( - model=model, - messages=messages, - headers=headers, - **kwargs - ) + + async with ClientSession(headers=headers) as session: + data = { + "model": model, + "messages": messages, + "stream": stream + } + + async with session.post( + cls.api_endpoint, + json=data, + proxy=proxy + ) as response: + await raise_for_status(response) + + if stream: + reasoning_buffer = [] + in_reasoning = False + + async for line in response.content: + if not line: + continue + + line = line.decode('utf-8').strip() + print(line) + + if not line.startswith("data: "): + continue + + if line == "data: [DONE]": + if in_reasoning and reasoning_buffer: + yield Reasoning(status="".join(reasoning_buffer).strip()) + yield FinishReason("stop") + return + + try: + json_data = json.loads(line[6:]) + delta = json_data["choices"][0].get("delta", {}) + content = delta.get("content", "") + finish_reason = json_data["choices"][0].get("finish_reason") + + if finish_reason: + if in_reasoning and reasoning_buffer: + yield Reasoning(status="".join(reasoning_buffer).strip()) + yield FinishReason(finish_reason) + return + + if model in cls.reasoning_models: + # Processing the beginning of a tag + if "" in content: + pre, _, post = content.partition("") + if pre: + yield pre + in_reasoning = True + content = post + + # Tag end processing + if "" in content: + in_reasoning = False + thought, _, post = content.partition("") + if thought: + reasoning_buffer.append(thought) + if reasoning_buffer: + yield Reasoning(status="".join(reasoning_buffer).strip()) + reasoning_buffer.clear() + if post: + yield post + continue + + # Buffering content inside tags + if in_reasoning: + reasoning_buffer.append(content) + else: + if content: + yield content + else: + if content: + yield content + + except json.JSONDecodeError: + continue + except Exception: + yield FinishReason("error") + return + else: + try: + response_data = await response.json() + message = response_data["choices"][0]["message"] + content = message["content"] + + if model in cls.reasoning_models and "" in content: + think_start = content.find("") + 7 + think_end = content.find("") + if think_start > 6 and think_end > 0: + reasoning = content[think_start:think_end].strip() + yield Reasoning(status=reasoning) + content = content[think_end + 8:].strip() + + yield content + yield FinishReason("stop") + except Exception: + yield FinishReason("error") diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index c7ad606f..d79078be 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -15,6 +15,7 @@ from .mini_max import HailuoAI, MiniMax from .template import OpenaiTemplate, BackendApi from .Blackbox import Blackbox +from .BlackboxAPI import BlackboxAPI from .CablyAI import CablyAI from .ChatGLM import ChatGLM from .ChatGpt import ChatGpt diff --git a/g4f/models.py b/g4f/models.py index c127c302..a0999b60 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -6,6 +6,7 @@ from .Provider import IterListProvider, ProviderType from .Provider import ( ### no auth required ### Blackbox, + BlackboxAPI, CablyAI, ChatGLM, ChatGptEs, @@ -123,20 +124,20 @@ gpt_35_turbo = Model( gpt_4 = Model( name = 'gpt-4', base_provider = 'OpenAI', - best_provider = IterListProvider([DDG, Blackbox, Jmuz, ChatGptEs, ChatGptt, PollinationsAI, Yqcloud, Copilot, OpenaiChat, Liaobots, Mhystical]) + best_provider = IterListProvider([DDG, Jmuz, ChatGptEs, ChatGptt, PollinationsAI, Yqcloud, Copilot, OpenaiChat, Liaobots, Mhystical]) ) # gpt-4o gpt_4o = VisionModel( name = 'gpt-4o', base_provider = 'OpenAI', - best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, Liaobots, OpenaiChat]) + best_provider = IterListProvider([ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, Liaobots, OpenaiChat]) ) gpt_4o_mini = Model( name = 'gpt-4o-mini', base_provider = 'OpenAI', - best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, Liaobots, OpenaiChat]) + best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, CablyAI, Liaobots, OpenaiChat]) ) # o1 @@ -158,6 +159,13 @@ o1_mini = Model( best_provider = Liaobots ) +# o3 +o3_mini_low = Model( + name = 'o3-mini-low', + base_provider = 'OpenAI', + best_provider = CablyAI +) + ### GigaChat ### gigachat = Model( name = 'GigaChat:latest', @@ -195,7 +203,7 @@ llama_3_70b = Model( llama_3_1_8b = Model( name = "llama-3.1-8b", base_provider = "Meta Llama", - best_provider = IterListProvider([Blackbox, DeepInfraChat, Glider, Jmuz, PollinationsAI, Cloudflare]) + best_provider = IterListProvider([Blackbox, DeepInfraChat, Glider, Jmuz, PollinationsAI, CablyAI, Cloudflare]) ) llama_3_1_70b = Model( @@ -243,12 +251,6 @@ llama_3_3_70b = Model( ) ### Mistral ### -mixtral_7b = Model( - name = "mixtral-7b", - base_provider = "Mistral", - best_provider = Blackbox -) - mixtral_8x7b = Model( name = "mixtral-8x7b", base_provider = "Mistral", @@ -261,11 +263,17 @@ mistral_nemo = Model( best_provider = IterListProvider([PollinationsAI, HuggingChat, HuggingFace]) ) +mixtral_small_28b = Model( + name = "mixtral-small-28b", + base_provider = "Mistral", + best_provider = IterListProvider([Blackbox, BlackboxAPI]) +) + ### NousResearch ### hermes_2_dpo = Model( name = "hermes-2-dpo", base_provider = "NousResearch", - best_provider = Blackbox + best_provider = IterListProvider([Blackbox, BlackboxAPI]) ) @@ -356,7 +364,7 @@ claude_3_opus = Model( claude_3_5_sonnet = Model( name = 'claude-3.5-sonnet', base_provider = 'Anthropic', - best_provider = IterListProvider([Blackbox, Jmuz, Liaobots]) + best_provider = IterListProvider([Jmuz, Liaobots]) ) ### Reka AI ### @@ -434,7 +442,7 @@ qwen_2_5_1m = Model( qwq_32b = Model( name = 'qwq-32b', base_provider = 'Qwen', - best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, HuggingChat]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, Jmuz, HuggingChat]) ) qvq_72b = VisionModel( name = 'qvq-72b', @@ -453,19 +461,19 @@ pi = Model( deepseek_chat = Model( name = 'deepseek-chat', base_provider = 'DeepSeek', - best_provider = IterListProvider([Blackbox, Jmuz, PollinationsAI]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, Jmuz, PollinationsAI]) ) deepseek_v3 = Model( name = 'deepseek-v3', base_provider = 'DeepSeek', - best_provider = IterListProvider([Blackbox, DeepInfraChat]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, CablyAI]) ) deepseek_r1 = Model( name = 'deepseek-r1', base_provider = 'DeepSeek', - best_provider = IterListProvider([Blackbox, Glider, PollinationsAI, Jmuz, HuggingChat, HuggingFace]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, Glider, PollinationsAI, Jmuz, CablyAI, HuggingChat, HuggingFace]) ) ### x.ai ### @@ -505,7 +513,7 @@ nemotron_70b = Model( dbrx_instruct = Model( name = 'dbrx-instruct', base_provider = 'Databricks', - best_provider = Blackbox + best_provider = IterListProvider([Blackbox, BlackboxAPI]) ) ### PollinationsAI ### @@ -657,9 +665,9 @@ class ModelUtils: llama_3_3_70b.name: llama_3_3_70b, ### Mistral ### - mixtral_7b.name: mixtral_7b, mixtral_8x7b.name: mixtral_8x7b, mistral_nemo.name: mistral_nemo, + mixtral_small_28b.name: mixtral_small_28b, ### NousResearch ### hermes_2_dpo.name: hermes_2_dpo, -- cgit v1.2.3 From 569077ac20178e3991c4f92f4c10d06b529cd0d3 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Mon, 3 Feb 2025 14:23:13 +0200 Subject: Update docs/providers-and-models.md g4f/models.py --- docs/providers-and-models.md | 2 -- g4f/models.py | 16 ---------------- 2 files changed, 18 deletions(-) diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index 7fbb5fae..a494b620 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -188,8 +188,6 @@ This document provides an overview of various AI providers and models, including |sonar-reasoning|Perplexity AI|1+ Providers|[docs.perplexity.ai](https://docs.perplexity.ai/)| |nemotron-70b|Nvidia|3+ Providers|[build.nvidia.com](https://build.nvidia.com/nvidia/llama-3_1-nemotron-70b-instruct)| |dbrx-instruct|Databricks|2+ Providers|[huggingface.co](https://huggingface.co/databricks/dbrx-instruct)| -|p1|PollinationsAI|1+ Providers|[pollinations.ai](https://pollinations.ai/)| -|cably-80b|CablyAI|1+ Providers|[cablyai.com](https://cablyai.com)| |glm-4|THUDM|1+ Providers|[github.com/THUDM](https://github.com/THUDM/GLM-4)| |mini_max|MiniMax|1+ Providers|[hailuo.ai](https://www.hailuo.ai/)| |evil|Evil Mode - Experimental|1+ Providers|| diff --git a/g4f/models.py b/g4f/models.py index a0999b60..4baf37f6 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -516,20 +516,6 @@ dbrx_instruct = Model( best_provider = IterListProvider([Blackbox, BlackboxAPI]) ) -### PollinationsAI ### -p1 = Model( - name = 'p1', - base_provider = 'PollinationsAI', - best_provider = PollinationsAI -) - -### CablyAI ### -cably_80b = Model( - name = 'cably-80b', - base_provider = 'CablyAI', - best_provider = CablyAI -) - ### THUDM ### glm_4 = Model( name = 'glm-4', @@ -743,8 +729,6 @@ class ModelUtils: nemotron_70b.name: nemotron_70b, ### Nvidia ### dbrx_instruct.name: dbrx_instruct, ### Databricks ### - p1.name: p1, ### PollinationsAI ### - cably_80b.name: cably_80b, ### CablyAI ### glm_4.name: glm_4, ### THUDM ### mini_max.name: mini_max, ## MiniMax evil.name: evil, ### Uncensored AI ### -- cgit v1.2.3 From de3a710140b11e29b9790bfd3f4a6ec33dfb4797 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Mon, 3 Feb 2025 15:16:48 +0200 Subject: Update docs/providers-and-models.md g4f/models.py g4f/Provider/Liaobots.py --- docs/providers-and-models.md | 2 +- g4f/Provider/Liaobots.py | 31 +++++++++++++++++++++++++++++++ g4f/models.py | 4 ++-- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index a494b620..a39011b7 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -61,7 +61,7 @@ This document provides an overview of various AI providers and models, including |[editor.imagelabs.net](editor.imagelabs.net)|No auth required|`g4f.Provider.ImageLabs`|❌|`sdxl-turbo`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[huggingface.co/spaces](https://huggingface.co/spaces)|Optional API key|`g4f.Provider.HuggingSpace`|`qvq-72b, qwen-2-72b, command-r, command-r-plus, command-r7b`|`flux-dev, flux-schnell, sd-3.5`|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[jmuz.me](https://jmuz.me)|Optional API key|`g4f.Provider.Jmuz`|`claude-3-haiku, claude-3-opus, claude-3-haiku, claude-3.5-sonnet, deepseek-r1, deepseek-chat, gemini-exp, gemini-1.5-flash, gemini-1.5-pro, gemini-2.0-flash-thinking, gpt-4, gpt-4o, gpt-4o-mini, llama-3-70b, llama-3-8b, llama-3.1-405b, llama-3.1-70b, llama-3.1-8b, llama-3.2-11b, llama-3.2-90b, llama-3.3-70b, mixtral-8x7b, qwen-2.5-72b, qwen-2.5-coder-32b, qwq-32b, wizardlm-2-8x22b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| -|[liaobots.work](https://liaobots.work)|[Automatic cookies](https://liaobots.work)|`g4f.Provider.Liaobots`|`grok-2, gpt-4o-mini, gpt-4o, gpt-4, o1-preview, o1-mini, claude-3-opus, claude-3.5-sonnet, claude-3-sonnet, gemini-1.5-flash, gemini-1.5-pro, gemini-2.0-flash, gemini-2.0-flash-thinking`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| +|[liaobots.work](https://liaobots.work)|[Automatic cookies](https://liaobots.work)|`g4f.Provider.Liaobots`|`grok-2, gpt-4o-mini, gpt-4o, gpt-4, o1-preview, o1-mini, deepseek-r1, deepseek-v3, claude-3-opus, claude-3.5-sonnet, claude-3-sonnet, gemini-1.5-flash, gemini-1.5-pro, gemini-2.0-flash, gemini-2.0-flash-thinking`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[mhystical.cc](https://mhystical.cc)|[Optional API key](https://mhystical.cc/dashboard)|`g4f.Provider.Mhystical`|`gpt-4`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)| |[oi-vscode-server.onrender.com](https://oi-vscode-server.onrender.com)|No auth required|`g4f.Provider.OIVSCode`|`gpt-4o-mini`|❌|`gpt-4o-mini`|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[labs.perplexity.ai](https://labs.perplexity.ai)|No auth required|`g4f.Provider.PerplexityLabs`|`sonar-online, sonar-chat, llama-3.3-70b, llama-3.1-8b, llama-3.1-70b, lfm-40b`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)| diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 5e8fbf32..18a5f82d 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -54,6 +54,33 @@ models = { "tokenLimit": 100000, "context": "128K", }, + "DeepSeek-R1-Distill-Llama-70b": { + "id": "DeepSeek-R1-Distill-Llama-70b", + "name": "DeepSeek-R1-70B", + "model": "DeepSeek-R1-70B", + "provider": "DeepSeek", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "128K", + }, + "DeepSeek-R1": { + "id": "DeepSeek-R1", + "name": "DeepSeek-R1", + "model": "DeepSeek-R1", + "provider": "DeepSeek", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "128K", + }, + "DeepSeek-V3": { + "id": "DeepSeek-V3", + "name": "DeepSeek-V3", + "model": "DeepSeek-V3", + "provider": "DeepSeek", + "maxLength": 400000, + "tokenLimit": 100000, + "context": "128K", + }, "grok-2": { "id": "grok-2", "name": "Grok-2", @@ -172,6 +199,10 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): "o1-preview": "o1-preview-2024-09-12", "o1-mini": "o1-mini-2024-09-12", + "deepseek-r1": "DeepSeek-R1-Distill-Llama-70b", + "deepseek-r1": "DeepSeek-R1", + "deepseek-v3": "DeepSeek-V3", + "claude-3-opus": "claude-3-opus-20240229", "claude-3.5-sonnet": "claude-3-5-sonnet-20240620", "claude-3.5-sonnet": "claude-3-5-sonnet-20241022", diff --git a/g4f/models.py b/g4f/models.py index 4baf37f6..e385f0ba 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -467,13 +467,13 @@ deepseek_chat = Model( deepseek_v3 = Model( name = 'deepseek-v3', base_provider = 'DeepSeek', - best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, CablyAI]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, CablyAI, Liaobots]) ) deepseek_r1 = Model( name = 'deepseek-r1', base_provider = 'DeepSeek', - best_provider = IterListProvider([Blackbox, BlackboxAPI, Glider, PollinationsAI, Jmuz, CablyAI, HuggingChat, HuggingFace]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, Glider, PollinationsAI, Jmuz, CablyAI, Liaobots, HuggingChat, HuggingFace]) ) ### x.ai ### -- cgit v1.2.3 From c3e4a7c4e566d5614c45d0b3fcbeb29475d98734 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Mon, 3 Feb 2025 16:09:37 +0200 Subject: The DarkAI provider is disabled --- docs/providers-and-models.md | 7 +--- g4f/Provider/DarkAI.py | 76 ----------------------------------- g4f/Provider/__init__.py | 1 - g4f/Provider/not_working/DarkAI.py | 77 ++++++++++++++++++++++++++++++++++++ g4f/Provider/not_working/__init__.py | 1 + g4f/models.py | 19 +-------- 6 files changed, 82 insertions(+), 99 deletions(-) delete mode 100644 g4f/Provider/DarkAI.py create mode 100644 g4f/Provider/not_working/DarkAI.py diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index a39011b7..5e616ec3 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -48,7 +48,6 @@ This document provides an overview of various AI providers and models, including |[chatgptt.me](https://chatgptt.me)|No auth required|`g4f.Provider.ChatGptt`|`gpt-4, gpt-4o, gpt-4o-mini`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|[Automatic cookies](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, qwen-1.5-7b`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌| |[copilot.microsoft.com](https://copilot.microsoft.com)|Optional API key|`g4f.Provider.Copilot`|`gpt-4, gpt-4o`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| -|[darkai.foundation](https://darkai.foundation)|No auth required|`g4f.Provider.DarkAI`|`gpt-3.5-turbo, gpt-4o, llama-3.1-70b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[duckduckgo.com/aichat](https://duckduckgo.com/aichat)|No auth required|`g4f.Provider.DDG`|`gpt-4, gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[deepinfra.com/chat](https://deepinfra.com/chat)|No auth required|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, deepseek-chat, qwq-32b, wizardlm-2-8x22b, wizardlm-2-7b, qwen-2.5-72b, qwen-2.5-coder-32b, nemotron-70b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[chat10.free2gpt.xyz](https://chat10.free2gpt.xyz)|No auth required|`g4f.Provider.Free2GPT`|`mistral-7b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| @@ -125,10 +124,8 @@ This document provides an overview of various AI providers and models, including ### Text Models | Model | Base Provider | Providers | Website | |-------|---------------|-----------|---------| -|gpt-3|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| -|gpt-3.5-turbo|OpenAI|1+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-3-5-turbo)| |gpt-4|OpenAI|10+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4)| -|gpt-4o|OpenAI|8+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)| +|gpt-4o|OpenAI|7+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o)| |gpt-4o-mini|OpenAI|9+ Providers|[platform.openai.com](https://platform.openai.com/docs/models/gpt-4o-mini)| |o1|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)| |o1-preview|OpenAI|1+ Providers|[openai.com](https://openai.com/index/introducing-openai-o1-preview/)| @@ -140,7 +137,7 @@ This document provides an overview of various AI providers and models, including |llama-3-8b|Meta Llama|2+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3/)| |llama-3-70b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Meta-Llama-3-70B)| |llama-3.1-8b|Meta Llama|7+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| -|llama-3.1-70b|Meta Llama|6+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| +|llama-3.1-70b|Meta Llama|5+ Providers|[ai.meta.com](https://ai.meta.com/blog/meta-llama-3-1/)| |llama-3.1-405b|Meta Llama|2+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.1-405B)| |llama-3.2-1b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-1B)| |llama-3.2-3b|Meta Llama|1+ Providers|[huggingface.co](https://huggingface.co/meta-llama/Llama-3.2-3B)| diff --git a/g4f/Provider/DarkAI.py b/g4f/Provider/DarkAI.py deleted file mode 100644 index 1562703a..00000000 --- a/g4f/Provider/DarkAI.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import annotations - -import json -from aiohttp import ClientSession, ClientTimeout, StreamReader - -from ..typing import AsyncResult, Messages -from ..requests.raise_for_status import raise_for_status -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - -class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://darkai.foundation/chat" - api_endpoint = "https://darkai.foundation/chat" - working = True - supports_stream = True - - default_model = 'llama-3-70b' - models = [ - 'gpt-4o', - 'gpt-3.5-turbo', - default_model, - ] - model_aliases = { - "llama-3.1-70b": "llama-3-70b", - } - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - - headers = { - "accept": "text/event-stream", - "content-type": "application/json", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" - } - - timeout = ClientTimeout(total=600) # Increase timeout to 10 minutes - - async with ClientSession(headers=headers, timeout=timeout) as session: - prompt = format_prompt(messages) - data = { - "query": prompt, - "model": model, - } - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - await raise_for_status(response) - reader: StreamReader = response.content - buffer = b"" - while True: - chunk = await reader.read(1024) # Read in smaller chunks - if not chunk: - break - buffer += chunk - while b"\n" in buffer: - line, buffer = buffer.split(b"\n", 1) - line = line.strip() - if line: - try: - line_str = line.decode() - if line_str.startswith('data: '): - chunk_data = json.loads(line_str[6:]) - if chunk_data['event'] == 'text-chunk': - chunk = chunk_data['data']['text'] - yield chunk - elif chunk_data['event'] == 'stream-end': - return - except json.JSONDecodeError: - pass - except Exception: - pass diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index d79078be..05788e86 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -23,7 +23,6 @@ from .ChatGptEs import ChatGptEs from .ChatGptt import ChatGptt from .Cloudflare import Cloudflare from .Copilot import Copilot -from .DarkAI import DarkAI from .DDG import DDG from .DeepInfraChat import DeepInfraChat from .Free2GPT import Free2GPT diff --git a/g4f/Provider/not_working/DarkAI.py b/g4f/Provider/not_working/DarkAI.py new file mode 100644 index 00000000..cb19c6bd --- /dev/null +++ b/g4f/Provider/not_working/DarkAI.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import json +from aiohttp import ClientSession, ClientTimeout, StreamReader + +from ...typing import AsyncResult, Messages +from ...requests.raise_for_status import raise_for_status +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +class DarkAI(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://darkai.foundation/chat" + api_endpoint = "https://darkai.foundation/chat" + + working = False + supports_stream = True + + default_model = 'llama-3-70b' + models = [ + 'gpt-4o', + 'gpt-3.5-turbo', + default_model, + ] + model_aliases = { + "llama-3.1-70b": "llama-3-70b", + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + + headers = { + "accept": "text/event-stream", + "content-type": "application/json", + "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36" + } + + timeout = ClientTimeout(total=600) # Increase timeout to 10 minutes + + async with ClientSession(headers=headers, timeout=timeout) as session: + prompt = format_prompt(messages) + data = { + "query": prompt, + "model": model, + } + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + await raise_for_status(response) + reader: StreamReader = response.content + buffer = b"" + while True: + chunk = await reader.read(1024) # Read in smaller chunks + if not chunk: + break + buffer += chunk + while b"\n" in buffer: + line, buffer = buffer.split(b"\n", 1) + line = line.strip() + if line: + try: + line_str = line.decode() + if line_str.startswith('data: '): + chunk_data = json.loads(line_str[6:]) + if chunk_data['event'] == 'text-chunk': + chunk = chunk_data['data']['text'] + yield chunk + elif chunk_data['event'] == 'stream-end': + return + except json.JSONDecodeError: + pass + except Exception: + pass diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py index 6dfe69f7..fa0b5dbb 100644 --- a/g4f/Provider/not_working/__init__.py +++ b/g4f/Provider/not_working/__init__.py @@ -10,6 +10,7 @@ from .Aura import Aura from .Chatgpt4o import Chatgpt4o from .Chatgpt4Online import Chatgpt4Online from .ChatgptFree import ChatgptFree +from .DarkAI import DarkAI from .FlowGpt import FlowGpt from .FreeNetfly import FreeNetfly from .Koala import Koala diff --git a/g4f/models.py b/g4f/models.py index e385f0ba..c745e586 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -13,7 +13,6 @@ from .Provider import ( ChatGptt, Cloudflare, Copilot, - DarkAI, DDG, DeepInfraChat, HuggingSpace, @@ -87,7 +86,6 @@ default = Model( Jmuz, CablyAI, OIVSCode, - DarkAI, OpenaiChat, Cloudflare, ]) @@ -113,13 +111,6 @@ default_vision = Model( ################### ### OpenAI ### -# gpt-3.5 -gpt_35_turbo = Model( - name = 'gpt-3.5-turbo', - base_provider = 'OpenAI', - best_provider = DarkAI -) - # gpt-4 gpt_4 = Model( name = 'gpt-4', @@ -131,7 +122,7 @@ gpt_4 = Model( gpt_4o = VisionModel( name = 'gpt-4o', base_provider = 'OpenAI', - best_provider = IterListProvider([ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, Liaobots, OpenaiChat]) + best_provider = IterListProvider([ChatGptt, Jmuz, ChatGptEs, PollinationsAI, Copilot, Liaobots, OpenaiChat]) ) gpt_4o_mini = Model( @@ -209,7 +200,7 @@ llama_3_1_8b = Model( llama_3_1_70b = Model( name = "llama-3.1-70b", base_provider = "Meta Llama", - best_provider = IterListProvider([DDG, Blackbox, Glider, Jmuz, TeachAnything, DarkAI]) + best_provider = IterListProvider([DDG, Blackbox, Glider, Jmuz, TeachAnything]) ) llama_3_1_405b = Model( @@ -608,12 +599,6 @@ class ModelUtils: ############ ### OpenAI ### - # gpt-3 - 'gpt-3': gpt_35_turbo, - - # gpt-3.5 - gpt_35_turbo.name: gpt_35_turbo, - # gpt-4 gpt_4.name: gpt_4, -- cgit v1.2.3 From 197f491d43ef3b34fbb5837337bb2bb2998e96ea Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Mon, 3 Feb 2025 16:17:41 +0200 Subject: Disabled provider 'GPROChat' --- docs/providers-and-models.md | 2 +- g4f/Provider/GPROChat.py | 69 ------------------------------------ g4f/Provider/__init__.py | 1 - g4f/Provider/not_working/GPROChat.py | 60 +++++++++++++++++++++++++++++++ g4f/Provider/not_working/__init__.py | 1 + g4f/models.py | 3 +- 6 files changed, 63 insertions(+), 73 deletions(-) delete mode 100644 g4f/Provider/GPROChat.py create mode 100644 g4f/Provider/not_working/GPROChat.py diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index 5e616ec3..5979da34 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -154,7 +154,7 @@ This document provides an overview of various AI providers and models, including |gemini|Google DeepMind|1+|[deepmind.google](http://deepmind.google/technologies/gemini/)| |gemini-exp|Google DeepMind|1+ Providers|[blog.google](https://blog.google/feed/gemini-exp-1206/)| |gemini-1.5-flash|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| -|gemini-1.5-pro|Google DeepMind|6+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)| +|gemini-1.5-pro|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)| |gemini-2.0-flash|Google DeepMind|2+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| |gemini-2.0-flash-thinking|Google DeepMind|1+ Providers|[ai.google.dev](https://ai.google.dev/gemini-api/docs/thinking-mode)| |claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)| diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/GPROChat.py deleted file mode 100644 index 71465ecf..00000000 --- a/g4f/Provider/GPROChat.py +++ /dev/null @@ -1,69 +0,0 @@ -from __future__ import annotations - -import time -import hashlib -from aiohttp import ClientSession - -from ..typing import AsyncResult, Messages -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import format_prompt - -class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): - url = "https://gprochat.com" - api_endpoint = "https://gprochat.com/api/generate" - - working = True - supports_stream = True - supports_message_history = True - default_model = 'gemini-1.5-pro' - - @staticmethod - def generate_signature(timestamp: int, message: str) -> str: - secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" - hash_input = f"{timestamp}:{message}:{secret_key}" - signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() - return signature - - @classmethod - def get_model(cls, model: str) -> str: - if model in cls.models: - return model - elif model in cls.model_aliases: - return cls.model_aliases[model] - else: - return cls.default_model - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - **kwargs - ) -> AsyncResult: - model = cls.get_model(model) - timestamp = int(time.time() * 1000) - prompt = format_prompt(messages) - sign = cls.generate_signature(timestamp, prompt) - - headers = { - "accept": "*/*", - "origin": cls.url, - "referer": f"{cls.url}/", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", - "content-type": "text/plain;charset=UTF-8" - } - - data = { - "messages": [{"role": "user", "parts": [{"text": prompt}]}], - "time": timestamp, - "pass": None, - "sign": sign - } - - async with ClientSession(headers=headers) as session: - async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content.iter_any(): - if chunk: - yield chunk.decode() diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 05788e86..a0f52fe4 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -29,7 +29,6 @@ from .Free2GPT import Free2GPT from .FreeGpt import FreeGpt from .GizAI import GizAI from .Glider import Glider -from .GPROChat import GPROChat from .ImageLabs import ImageLabs from .Jmuz import Jmuz from .Liaobots import Liaobots diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py new file mode 100644 index 00000000..5274157c --- /dev/null +++ b/g4f/Provider/not_working/GPROChat.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +import time +import hashlib +from aiohttp import ClientSession + +from ...typing import AsyncResult, Messages +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import format_prompt + +class GPROChat(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://gprochat.com" + api_endpoint = "https://gprochat.com/api/generate" + + working = False + supports_stream = True + supports_message_history = True + default_model = 'gemini-1.5-pro' + + @staticmethod + def generate_signature(timestamp: int, message: str) -> str: + secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8" + hash_input = f"{timestamp}:{message}:{secret_key}" + signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest() + return signature + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + timestamp = int(time.time() * 1000) + prompt = format_prompt(messages) + sign = cls.generate_signature(timestamp, prompt) + + headers = { + "accept": "*/*", + "origin": cls.url, + "referer": f"{cls.url}/", + "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36", + "content-type": "text/plain;charset=UTF-8" + } + + data = { + "messages": [{"role": "user", "parts": [{"text": prompt}]}], + "time": timestamp, + "pass": None, + "sign": sign + } + + async with ClientSession(headers=headers) as session: + async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: + response.raise_for_status() + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode() diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py index fa0b5dbb..978cd839 100644 --- a/g4f/Provider/not_working/__init__.py +++ b/g4f/Provider/not_working/__init__.py @@ -13,6 +13,7 @@ from .ChatgptFree import ChatgptFree from .DarkAI import DarkAI from .FlowGpt import FlowGpt from .FreeNetfly import FreeNetfly +from .GPROChat import GPROChat from .Koala import Koala from .MagickPen import MagickPen from .MyShell import MyShell diff --git a/g4f/models.py b/g4f/models.py index c745e586..195e02a2 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -17,7 +17,6 @@ from .Provider import ( DeepInfraChat, HuggingSpace, Glider, - GPROChat, ImageLabs, Jmuz, Liaobots, @@ -314,7 +313,7 @@ gemini_1_5_flash = Model( gemini_1_5_pro = Model( name = 'gemini-1.5-pro', base_provider = 'Google DeepMind', - best_provider = IterListProvider([Blackbox, Jmuz, GPROChat, Gemini, GeminiPro, Liaobots]) + best_provider = IterListProvider([Blackbox, Jmuz, Gemini, GeminiPro, Liaobots]) ) # gemini-2.0 -- cgit v1.2.3 From 95821b5b8b7ab5232a8ff03b41abd9d5bd369953 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Mon, 3 Feb 2025 17:57:27 +0200 Subject: Update g4f/Provider/DeepInfraChat.py --- docs/providers-and-models.md | 4 ++-- g4f/Provider/DeepInfraChat.py | 28 ++++++++++++++++------------ g4f/models.py | 22 +++++++++++----------- 3 files changed, 29 insertions(+), 25 deletions(-) diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index 5979da34..8d2a1773 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -49,7 +49,7 @@ This document provides an overview of various AI providers and models, including |[playground.ai.cloudflare.com](https://playground.ai.cloudflare.com)|[Automatic cookies](https://playground.ai.cloudflare.com)|`g4f.Provider.Cloudflare`|`llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, qwen-1.5-7b`|❌|❌|✔|![Error](https://img.shields.io/badge/Active-brightgreen)|❌| |[copilot.microsoft.com](https://copilot.microsoft.com)|Optional API key|`g4f.Provider.Copilot`|`gpt-4, gpt-4o`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[duckduckgo.com/aichat](https://duckduckgo.com/aichat)|No auth required|`g4f.Provider.DDG`|`gpt-4, gpt-4o-mini, claude-3-haiku, llama-3.1-70b, mixtral-8x7b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| -|[deepinfra.com/chat](https://deepinfra.com/chat)|No auth required|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.1-70b, deepseek-chat, qwq-32b, wizardlm-2-8x22b, wizardlm-2-7b, qwen-2.5-72b, qwen-2.5-coder-32b, nemotron-70b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| +|[deepinfra.com/chat](https://deepinfra.com/chat)|No auth required|`g4f.Provider.DeepInfraChat`|`llama-3.1-8b, llama-3.2-90b, llama-3.3-70b, deepseek-v3, mixtral-small-28b, deepseek-r1, phi-4, wizardlm-2-8x22b, qwen-2.5-72b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[chat10.free2gpt.xyz](https://chat10.free2gpt.xyz)|No auth required|`g4f.Provider.Free2GPT`|`mistral-7b`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[freegptsnav.aifree.site](https://freegptsnav.aifree.site)|No auth required|`g4f.Provider.FreeGpt`|`gemini-1.5-pro`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[app.giz.ai/assistant](https://app.giz.ai/assistant)|No auth required|`g4f.Provider.GizAI`|`gemini-1.5-flash`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| @@ -149,7 +149,7 @@ This document provides an overview of various AI providers and models, including |mixtral-small-28b|Mistral|2+ Providers|[mistral.ai](https://mistral.ai/news/mixtral-small-28b/)| |hermes-2-dpo|NousResearch|2+ Providers|[huggingface.co](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)| |phi-3.5-mini|Microsoft|1+ Providers|[huggingface.co](https://huggingface.co/microsoft/Phi-3.5-mini-instruct)| -|wizardlm-2-7b|Microsoft|1+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)| +|phi-4|Microsoft|1+ Providers|[techcommunity.microsoft.com](https://techcommunity.microsoft.com/blog/aiplatformblog/introducing-phi-4-microsoft%E2%80%99s-newest-small-language-model-specializing-in-comple/4357090)| |wizardlm-2-8x22b|Microsoft|2+ Providers|[wizardlm.github.io](https://wizardlm.github.io/WizardLM2/)| |gemini|Google DeepMind|1+|[deepmind.google](http://deepmind.google/technologies/gemini/)| |gemini-exp|Google DeepMind|1+ Providers|[blog.google](https://blog.google/feed/gemini-exp-1206/)| diff --git a/g4f/Provider/DeepInfraChat.py b/g4f/Provider/DeepInfraChat.py index 8c3b8af5..9f22ba03 100644 --- a/g4f/Provider/DeepInfraChat.py +++ b/g4f/Provider/DeepInfraChat.py @@ -10,30 +10,30 @@ class DeepInfraChat(OpenaiTemplate): default_model = 'meta-llama/Llama-3.3-70B-Instruct-Turbo' models = [ - 'meta-llama/Llama-3.3-70B-Instruct', 'meta-llama/Meta-Llama-3.1-8B-Instruct', + 'meta-llama/Llama-3.2-90B-Vision-Instruct', default_model, - 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo', 'deepseek-ai/DeepSeek-V3', - 'Qwen/QwQ-32B-Preview', + 'mistralai/Mistral-Small-24B-Instruct-2501', + 'deepseek-ai/DeepSeek-R1', + 'deepseek-ai/DeepSeek-R1-Distill-Llama-70B', + 'deepseek-ai/DeepSeek-R1-Distill-Qwen-32B', + 'microsoft/phi-4', 'microsoft/WizardLM-2-8x22B', - 'microsoft/WizardLM-2-7B', 'Qwen/Qwen2.5-72B-Instruct', - 'Qwen/Qwen2.5-Coder-32B-Instruct', - 'nvidia/Llama-3.1-Nemotron-70B-Instruct', ] model_aliases = { - "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "llama-3.1-8b": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo", - "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "deepseek-v3": "deepseek-ai/DeepSeek-V3", - "qwq-32b": "Qwen/QwQ-32B-Preview", + "mixtral-small-28b": "mistralai/Mistral-Small-24B-Instruct-2501", + "deepseek-r1": "deepseek-ai/DeepSeek-R1", + "deepseek-r1": "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "deepseek-r1": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "phi-4": "microsoft/phi-4", "wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B", - "wizardlm-2-7b": "microsoft/WizardLM-2-7B", "qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct", - "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", - "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct", } @classmethod @@ -41,6 +41,10 @@ class DeepInfraChat(OpenaiTemplate): cls, model: str, messages: Messages, + stream: bool = True, + top_p: float = 0.9, + temperature: float = 0.7, + max_tokens: int = None, headers: dict = {}, **kwargs ) -> AsyncResult: diff --git a/g4f/models.py b/g4f/models.py index 195e02a2..b3c1678a 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -230,7 +230,7 @@ llama_3_2_11b = VisionModel( llama_3_2_90b = Model( name = "llama-3.2-90b", base_provider = "Meta Llama", - best_provider = Jmuz + best_provider = IterListProvider([DeepInfraChat, Jmuz]) ) # llama 3.3 @@ -256,7 +256,7 @@ mistral_nemo = Model( mixtral_small_28b = Model( name = "mixtral-small-28b", base_provider = "Mistral", - best_provider = IterListProvider([Blackbox, BlackboxAPI]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat]) ) ### NousResearch ### @@ -275,13 +275,13 @@ phi_3_5_mini = Model( best_provider = HuggingChat ) -# wizardlm -wizardlm_2_7b = Model( - name = 'wizardlm-2-7b', - base_provider = 'Microsoft', +phi_4 = Model( + name = "phi-4", + base_provider = "Microsoft", best_provider = DeepInfraChat ) +# wizardlm wizardlm_2_8x22b = Model( name = 'wizardlm-2-8x22b', base_provider = 'Microsoft', @@ -420,7 +420,7 @@ qwen_2_5_72b = Model( qwen_2_5_coder_32b = Model( name = 'qwen-2.5-coder-32b', base_provider = 'Qwen', - best_provider = IterListProvider([DeepInfraChat, PollinationsAI, Jmuz, HuggingChat]) + best_provider = IterListProvider([PollinationsAI, Jmuz, HuggingChat]) ) qwen_2_5_1m = Model( name = 'qwen-2.5-1m-demo', @@ -432,7 +432,7 @@ qwen_2_5_1m = Model( qwq_32b = Model( name = 'qwq-32b', base_provider = 'Qwen', - best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, Jmuz, HuggingChat]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, Jmuz, HuggingChat]) ) qvq_72b = VisionModel( name = 'qvq-72b', @@ -463,7 +463,7 @@ deepseek_v3 = Model( deepseek_r1 = Model( name = 'deepseek-r1', base_provider = 'DeepSeek', - best_provider = IterListProvider([Blackbox, BlackboxAPI, Glider, PollinationsAI, Jmuz, CablyAI, Liaobots, HuggingChat, HuggingFace]) + best_provider = IterListProvider([Blackbox, BlackboxAPI, DeepInfraChat, Glider, PollinationsAI, Jmuz, CablyAI, Liaobots, HuggingChat, HuggingFace]) ) ### x.ai ### @@ -496,7 +496,7 @@ sonar_reasoning = Model( nemotron_70b = Model( name = 'nemotron-70b', base_provider = 'Nvidia', - best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace]) + best_provider = IterListProvider([HuggingChat, HuggingFace]) ) ### Databricks ### @@ -645,9 +645,9 @@ class ModelUtils: ### Microsoft ### # phi phi_3_5_mini.name: phi_3_5_mini, + phi_4.name: phi_4, # wizardlm - wizardlm_2_7b.name: wizardlm_2_7b, wizardlm_2_8x22b.name: wizardlm_2_8x22b, ### Google ### -- cgit v1.2.3 From 6a4a9b774cb63f74516b136d2b3c8956d892448c Mon Sep 17 00:00:00 2001 From: H Lohaus Date: Mon, 3 Feb 2025 17:20:28 +0100 Subject: Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 19a68fba..3128e960 100644 --- a/README.md +++ b/README.md @@ -93,12 +93,12 @@ Is your site on this repository and you want to take it down? Send an email to t ### 🐳 Using Docker 1. **Install Docker:** [Download and install Docker](https://docs.docker.com/get-docker/). -2. **Set Up Directories:** Before running the container, make sure the necessary data directories exist or can be created. For example, you can create and set ownership on these directories by running: (Only x64) +2. **Set Up Directories:** Before running the container, make sure the necessary data directories exist or can be created. For example, you can create and set ownership on these directories by running: ```bash mkdir -p ${PWD}/har_and_cookies ${PWD}/generated_images sudo chown -R 1200:1201 ${PWD}/har_and_cookies ${PWD}/generated_images ``` -3. **Run the Docker Container:** Use the following commands to pull the latest image and start the container: +3. **Run the Docker Container:** Use the following commands to pull the latest image and start the container (Only x64): ```bash docker pull hlohaus789/g4f docker run -p 8080:8080 -p 7900:7900 \ -- cgit v1.2.3 From eaf8619cae756c2b8c1d927f0269dd69f247e410 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Mon, 3 Feb 2025 19:16:36 +0200 Subject: Update g4f/Provider/Glider.py --- g4f/Provider/Glider.py | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/g4f/Provider/Glider.py b/g4f/Provider/Glider.py index dc85b3ba..f06feb65 100644 --- a/g4f/Provider/Glider.py +++ b/g4f/Provider/Glider.py @@ -20,12 +20,12 @@ class Glider(AsyncGeneratorProvider, ProviderModelMixin): supports_message_history = True default_model = 'chat-llama-3-1-70b' - reasoning_models = ['deepseek-ai/DeepSeek-R1'] models = [ 'chat-llama-3-1-70b', 'chat-llama-3-1-8b', 'chat-llama-3-2-3b', - ] + reasoning_models + 'deepseek-ai/DeepSeek-R1' + ] model_aliases = { "llama-3.1-70b": "chat-llama-3-1-70b", @@ -69,9 +69,6 @@ class Glider(AsyncGeneratorProvider, ProviderModelMixin): async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: await raise_for_status(response) - is_reasoning = False - current_reasoning = "" - async for chunk in response.content: if not chunk: continue @@ -82,34 +79,12 @@ class Glider(AsyncGeneratorProvider, ProviderModelMixin): continue if "[DONE]" in text: - if is_reasoning and current_reasoning: - yield Reasoning(status=current_reasoning.strip()) yield FinishReason("stop") return try: json_data = json.loads(text[6:]) content = json_data["choices"][0].get("delta", {}).get("content", "") - - if model in cls.reasoning_models: - if "" in content: - content = content.replace("", "") - is_reasoning = True - current_reasoning = content - continue - - if "" in content: - content = content.replace("", "") - is_reasoning = False - current_reasoning += content - yield Reasoning(status=current_reasoning.strip()) - current_reasoning = "" - continue - - if is_reasoning: - current_reasoning += content - continue - if content: yield content -- cgit v1.2.3