From 59c65a096f7e0400c1f09315c43c9ecaf9024191 Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Fri, 7 Feb 2025 19:50:21 +0200 Subject: feat(g4f/Provider/Blackbox.py): Add Gemini-Flash-2.0 model and improve premium handling --- docs/providers-and-models.md | 4 ++-- g4f/Provider/Blackbox.py | 8 +++++--- g4f/models.py | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/providers-and-models.md b/docs/providers-and-models.md index a19c7815..3e89f50a 100644 --- a/docs/providers-and-models.md +++ b/docs/providers-and-models.md @@ -38,7 +38,7 @@ This document provides an overview of various AI providers and models, including | Website | API Credentials | Provider | Text Models | Image Models | Vision (Image Upload) | Stream | Status | |----------|-------------|--------------|---------------|--------|--------|------|------| |[aichatfree.info](https://aichatfree.info)|No auth required|`g4f.Provider.AIChatFree`|`gemini-1.5-pro` _**(1+)**_|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| -|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, o3-mini, claude-3.5-sonnet, gemini-1.5-flash, gemini-1.5-pro, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1` _**(+34)**_|`flux`|`blackboxai, gpt-4o, o3-mini, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b`|✔|![](https://img.shields.io/badge/Active-brightgreen)| +|[blackbox.ai](https://www.blackbox.ai)|No auth required|`g4f.Provider.Blackbox`|`blackboxai, gpt-4, gpt-4o, o3-mini, claude-3.5-sonnet, gemini-1.5-flash, gemini-1.5-pro, blackboxai-pro, llama-3.1-8b, llama-3.1-70b, llama-3-1-405b, llama-3.3-70b, mixtral-small-28b, deepseek-chat, dbrx-instruct, qwq-32b, hermes-2-dpo, deepseek-r1, gemini-2.0-flash` _**(+34)**_|`flux`|`blackboxai, gpt-4o, o3-mini, gemini-1.5-pro, gemini-1.5-flash, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gemini-2.0-flash`|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[api.blackbox.ai](https://api.blackbox.ai)|No auth required|`g4f.Provider.BlackboxAPI`|`deepseek-v3, deepseek-r1, deepseek-chat, mixtral-small-28b, dbrx-instruct, qwq-32b, hermes-2-dpo`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[cablyai.com](https://cablyai.com)|Optional API key|`g4f.Provider.CablyAI`|`gpt-4o-mini, llama-3.1-8b, deepseek-v3, deepseek-r1, hermes-3, o3-mini-low, o3-mini, sonar-reasoning` _**(2+)**_|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| |[chatglm.cn](https://chatglm.cn)|No auth required|`g4f.Provider.ChatGLM`|`glm-4`|❌|❌|✔|![](https://img.shields.io/badge/Active-brightgreen)| @@ -157,7 +157,7 @@ This document provides an overview of various AI providers and models, including |gemini-exp|Google DeepMind|1+ Providers|[blog.google](https://blog.google/feed/gemini-exp-1206/)| |gemini-1.5-flash|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| |gemini-1.5-pro|Google DeepMind|5+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/pro/)| -|gemini-2.0-flash|Google DeepMind|3+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| +|gemini-2.0-flash|Google DeepMind|4+ Providers|[deepmind.google](https://deepmind.google/technologies/gemini/flash/)| |gemini-2.0-flash-thinking|Google DeepMind|1+ Providers|[ai.google.dev](https://ai.google.dev/gemini-api/docs/thinking-mode)| |claude-3-haiku|Anthropic|2+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-haiku)| |claude-3-sonnet|Anthropic|1+ Providers|[anthropic.com](https://www.anthropic.com/news/claude-3-family)| diff --git a/g4f/Provider/Blackbox.py b/g4f/Provider/Blackbox.py index 5ae48301..81f7c232 100644 --- a/g4f/Provider/Blackbox.py +++ b/g4f/Provider/Blackbox.py @@ -39,9 +39,9 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): default_vision_model = default_model default_image_model = 'ImageGeneration' image_models = [default_image_model] - vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b'] + vision_models = [default_vision_model, 'gpt-4o', 'o3-mini', 'gemini-pro', 'gemini-1.5-flash', 'llama-3.1-8b', 'llama-3.1-70b', 'llama-3.1-405b', 'Gemini-Flash-2.0'] - userSelectedModel = ['gpt-4o', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO'] + userSelectedModel = ['gpt-4o', 'o3-mini', 'gemini-pro', 'claude-sonnet-3.5', 'DeepSeek-V3', 'DeepSeek-R1', 'blackboxai-pro', 'Meta-Llama-3.3-70B-Instruct-Turbo', 'Mistral-Small-24B-Instruct-2501', 'DeepSeek-LLM-Chat-(67B)', 'DBRX-Instruct', 'Qwen-QwQ-32B-Preview', 'Nous-Hermes-2-Mixtral-8x7B-DPO', 'Gemini-Flash-2.0'] agentMode = { 'DeepSeek-V3': {'mode': True, 'id': "deepseek-chat", 'name': "DeepSeek-V3"}, @@ -52,6 +52,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): 'DBRX-Instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "DBRX-Instruct"}, 'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"}, 'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"}, + 'Gemini-Flash-2.0': {'mode': True, 'id': "Gemini/Gemini-Flash-2.0", 'name': "Gemini-Flash-2.0"}, } trendingAgentMode = { @@ -109,6 +110,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "dbrx-instruct": "DBRX-Instruct", "qwq-32b": "Qwen-QwQ-32B-Preview", "hermes-2-dpo": "Nous-Hermes-2-Mixtral-8x7B-DPO", + "gemini-2.0-flash": "Gemini-Flash-2.0", "flux": "ImageGeneration", } @@ -284,7 +286,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin): "vscodeClient": False, "codeInterpreterMode": False, "customProfile": {"name": "", "occupation": "", "traits": [], "additionalInfo": "", "enableNewChats": False}, - "session": {"user":{"name":"John Doe","email":"john.doe@gmail.com","image":"https://lh3.googleusercontent.com/a/ACg8ocK9X7mNpQ2vR4jH3tY8wL5nB1xM6fDS9JW2kLpTn4Vy3hR2xN4m=s96-c"},"expires":datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z'), "status": "PREMIUM"}, + "session": {"user":{"name":"John Doe","email":"john.doe@gmail.com","image":"https://lh3.googleusercontent.com/a/ACg8ocK9X7mNpQ2vR4jH3tY8wL5nB1xM6fDS9JW2kLpTn4Vy3hR2xN4m=s96-c","subscriptionStatus":"PREMIUM"},"expires":datetime.now(timezone.utc).isoformat(timespec='milliseconds').replace('+00:00', 'Z')}, "webSearchMode": False } diff --git a/g4f/models.py b/g4f/models.py index fa307bcc..8772b9bd 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -344,7 +344,7 @@ gemini_1_5_pro = Model( gemini_2_0_flash = Model( name = 'gemini-2.0-flash', base_provider = 'Google DeepMind', - best_provider = IterListProvider([PollinationsAI, GeminiPro, Liaobots]) + best_provider = IterListProvider([Blackbox, PollinationsAI, GeminiPro, Liaobots]) ) gemini_2_0_flash_thinking = Model( -- cgit v1.2.3