From d57f77c2e73b99199797b92e0fd6747019facf5b Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Sun, 28 Jul 2024 13:34:28 +0300 Subject: Fixed a bug in the Liaobots provider --- g4f/Provider/Liaobots.py | 118 +++++----------------------------- g4f/models.py | 163 ++++++++++++++++++++++++----------------------- 2 files changed, 97 insertions(+), 184 deletions(-) diff --git a/g4f/Provider/Liaobots.py b/g4f/Provider/Liaobots.py index 0cb5edff..af90860d 100644 --- a/g4f/Provider/Liaobots.py +++ b/g4f/Provider/Liaobots.py @@ -57,115 +57,26 @@ models = { }, "gpt-4-0613": { "id": "gpt-4-0613", - "name": "GPT-4-0613", + "name": "GPT-4", "model": "ChatGPT", "provider": "OpenAI", - "maxLength": 32000, - "tokenLimit": 7600, - "context": "8K", - }, - "claude-3-opus-20240229": { - "id": "claude-3-opus-20240229", - "name": "Claude-3-Opus", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 800000, - "tokenLimit": 200000, - "context": "200K", - }, - "claude-3-opus-20240229-aws": { - "id": "claude-3-opus-20240229-aws", - "name": "Claude-3-Opus-Aws", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 800000, - "tokenLimit": 200000, - "context": "200K", - }, - "claude-3-opus-100k-poe": { - "id": "claude-3-opus-100k-poe", - "name": "Claude-3-Opus-100k-Poe", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 400000, - "tokenLimit": 99000, - "context": "100K", - }, - "claude-3-sonnet-20240229": { - "id": "claude-3-sonnet-20240229", - "name": "Claude-3-Sonnet", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 800000, - "tokenLimit": 200000, - "context": "200K", - }, - "claude-3-5-sonnet-20240620": { - "id": "claude-3-5-sonnet-20240620", - "name": "Claude-3.5-Sonnet", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 800000, - "tokenLimit": 200000, - "context": "200K", - }, - "claude-3-haiku-20240307": { - "id": "claude-3-haiku-20240307", - "name": "Claude-3-Haiku", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 800000, - "tokenLimit": 200000, - "context": "200K", - }, - "claude-2.1": { - "id": "claude-2.1", - "name": "Claude-2.1-200k", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 800000, - "tokenLimit": 200000, - "context": "200K", - }, - "claude-2.0": { - "id": "claude-2.0", - "name": "Claude-2.0-100k", - "model": "Claude", - "provider": "Anthropic", - "maxLength": 400000, - "tokenLimit": 100000, - "context": "100K", - }, - "gemini-1.0-pro-latest": { - "id": "gemini-1.0-pro-latest", - "name": "Gemini-Pro", - "model": "Gemini", - "provider": "Google", - "maxLength": 120000, - "tokenLimit": 30000, - "context": "32K", + "maxLength": 260000, + "tokenLimit": 126000, + "context": "128K", }, - "gemini-1.5-flash-latest": { - "id": "gemini-1.5-flash-latest", - "name": "Gemini-1.5-Flash-1M", - "model": "Gemini", - "provider": "Google", - "maxLength": 4000000, - "tokenLimit": 1000000, - "context": "1024K", + "gpt-4-turbo": { + "id": "gpt-4-turbo", + "name": "GPT-4-Turbo", + "model": "ChatGPT", + "provider": "OpenAI", + "maxLength": 260000, + "tokenLimit": 126000, + "context": "128K", }, - "gemini-1.5-pro-latest": { - "id": "gemini-1.5-pro-latest", - "name": "Gemini-1.5-Pro-1M", - "model": "Gemini", - "provider": "Google", - "maxLength": 4000000, - "tokenLimit": 1000000, - "context": "1024K", - } } + class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): url = "https://liaobots.site" working = True @@ -178,13 +89,14 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin): model_aliases = { "gpt-4o-mini": "gpt-4o-mini-free", "gpt-4o": "gpt-4o-free", + "gpt-4-turbo": "gpt-4-turbo-2024-04-09", + "gpt-4-": "gpt-4-0613", "claude-3-opus": "claude-3-opus-20240229", "claude-3-opus": "claude-3-opus-20240229-aws", "claude-3-opus": "claude-3-opus-20240229-gcp", "claude-3-sonnet": "claude-3-sonnet-20240229", "claude-3-5-sonnet": "claude-3-5-sonnet-20240620", "claude-3-haiku": "claude-3-haiku-20240307", - "gpt-4-turbo": "gpt-4-turbo-2024-04-09", "gemini-pro": "gemini-1.5-pro-latest", "gemini-pro": "gemini-1.0-pro-latest", "gemini-flash": "gemini-1.5-flash-latest", diff --git a/g4f/models.py b/g4f/models.py index 21079507..f837223a 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -161,9 +161,10 @@ gpt_4_32k_0613 = Model( gpt_4_turbo = Model( name = 'gpt-4-turbo', base_provider = 'openai', - best_provider = Bing + best_provider = IterListProvider([ + Bing, Liaobots + ]) ) - gpt_4o = Model( name = 'gpt-4o', base_provider = 'openai', @@ -497,50 +498,50 @@ class ModelUtils: """ convert: dict[str, Model] = { - ############ - ### Text ### - ############ +############ +### Text ### +############ ### OpenAI ### ### GPT-3.5 / GPT-4 ### - # gpt-3.5 - 'gpt-3.5-turbo' : gpt_35_turbo, - 'gpt-3.5-turbo-0613' : gpt_35_turbo_0613, - 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, - 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, - 'gpt-3.5-long': gpt_35_long, - - # gpt-4 - 'gpt-4o' : gpt_4o, - 'gpt-4o-mini' : gpt_4o_mini, - 'gpt-4' : gpt_4, - 'gpt-4-0613' : gpt_4_0613, - 'gpt-4-32k' : gpt_4_32k, - 'gpt-4-32k-0613' : gpt_4_32k_0613, - 'gpt-4-turbo' : gpt_4_turbo, + # gpt-3.5 + 'gpt-3.5-turbo' : gpt_35_turbo, + 'gpt-3.5-turbo-0613' : gpt_35_turbo_0613, + 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, + 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, + 'gpt-3.5-long': gpt_35_long, + + # gpt-4 + 'gpt-4o' : gpt_4o, + 'gpt-4o-mini' : gpt_4o_mini, + 'gpt-4' : gpt_4, + 'gpt-4-0613' : gpt_4_0613, + 'gpt-4-32k' : gpt_4_32k, + 'gpt-4-32k-0613' : gpt_4_32k_0613, + 'gpt-4-turbo' : gpt_4_turbo, ### Meta ### - "meta-ai": meta, + "meta-ai": meta, - 'llama3-8b': llama3_8b_instruct, # alias - 'llama3-70b': llama3_70b_instruct, # alias - 'llama3-8b-instruct' : llama3_8b_instruct, - 'llama3-70b-instruct': llama3_70b_instruct, - 'llama-3.1-70b-Instruct': llama_3_1_70b_Instruct, - 'llama-3.1-405B-Instruct-FP8': llama_3_1_405b_Instruct_FP8, + 'llama3-8b': llama3_8b_instruct, # alias + 'llama3-70b': llama3_70b_instruct, # alias + 'llama3-8b-instruct' : llama3_8b_instruct, + 'llama3-70b-instruct': llama3_70b_instruct, + 'llama-3.1-70b-Instruct': llama_3_1_70b_Instruct, + 'llama-3.1-405B-Instruct-FP8': llama_3_1_405b_Instruct_FP8, - 'codellama-34b-instruct': codellama_34b_instruct, - 'codellama-70b-instruct': codellama_70b_instruct, + 'codellama-34b-instruct': codellama_34b_instruct, + 'codellama-70b-instruct': codellama_70b_instruct, - ### Mistral (Opensource) ### - 'mixtral-8x7b': mixtral_8x7b, - 'mistral-7b-v02': mistral_7b_v02, + ### Mistral (Opensource) ### + 'mixtral-8x7b': mixtral_8x7b, + 'mistral-7b-v02': mistral_7b_v02, - ### NousResearch ### + ### NousResearch ### 'Nous-Hermes-2-Mixtral-8x7B-DPO': Nous_Hermes_2_Mixtral_8x7B_DPO, @@ -552,95 +553,95 @@ class ModelUtils: 'Phi-3-mini-4k-instruct': Phi_3_mini_4k_instruct, - ### Google ### - # gemini - 'gemini': gemini, - 'gemini-pro': gemini_pro, - 'gemini-flash': gemini_flash, - - # gemma - 'gemma-2b-it': gemma_2b_it, - 'gemma-2-9b-it': gemma_2_9b_it, - 'gemma-2-27b-it': gemma_2_27b_it, + ### Google ### + # gemini + 'gemini': gemini, + 'gemini-pro': gemini_pro, + 'gemini-flash': gemini_flash, + + # gemma + 'gemma-2b-it': gemma_2b_it, + 'gemma-2-9b-it': gemma_2_9b_it, + 'gemma-2-27b-it': gemma_2_27b_it, - ### Anthropic ### - 'claude-2': claude_2, - 'claude-2.0': claude_2_0, - 'claude-2.1': claude_2_1, - 'claude-3-opus': claude_3_opus, - 'claude-3-sonnet': claude_3_sonnet, - 'claude-3-5-sonnet': claude_3_5_sonnet, - 'claude-3-haiku': claude_3_haiku, + ### Anthropic ### + 'claude-2': claude_2, + 'claude-2.0': claude_2_0, + 'claude-2.1': claude_2_1, + 'claude-3-opus': claude_3_opus, + 'claude-3-sonnet': claude_3_sonnet, + 'claude-3-5-sonnet': claude_3_5_sonnet, + 'claude-3-haiku': claude_3_haiku, - ### Reka AI ### - 'reka': reka_core, + ### Reka AI ### + 'reka': reka_core, - ### NVIDIA ### - 'nemotron-4-340b-instruct': nemotron_4_340b_instruct, - + ### NVIDIA ### + 'nemotron-4-340b-instruct': nemotron_4_340b_instruct, + ### Blackbox ### 'blackbox': blackbox, ### CohereForAI ### - 'command-r+': command_r_plus, + 'command-r+': command_r_plus, - ### Databricks ### - 'dbrx-instruct': dbrx_instruct, + ### Databricks ### + 'dbrx-instruct': dbrx_instruct, ### GigaChat ### - 'gigachat': gigachat, + 'gigachat': gigachat, ### iFlytek ### - 'SparkDesk-v1.1': SparkDesk_v1_1, + 'SparkDesk-v1.1': SparkDesk_v1_1, ### DeepSeek ### - 'deepseek-coder': deepseek_coder, - 'deepseek-chat': deepseek_chat, - + 'deepseek-coder': deepseek_coder, + 'deepseek-chat': deepseek_chat, - ### ### Qwen ### ### - 'Qwen2-7B-Instruct': Qwen2_7B_Instruct, + ### ### Qwen ### ### + 'Qwen2-7B-Instruct': Qwen2_7B_Instruct, - ### Zhipu AI ### - 'glm4-9B-chat': glm4_9B_chat, - 'chatglm3-6B': chatglm3_6B, + ### Zhipu AI ### + 'glm4-9B-chat': glm4_9B_chat, + 'chatglm3-6B': chatglm3_6B, - ### 01-ai ### - 'Yi-1.5-9B-Chat': Yi_1_5_9B_Chat, + ### 01-ai ### + 'Yi-1.5-9B-Chat': Yi_1_5_9B_Chat, - # Other - 'pi': pi, + # Other + 'pi': pi, + - ############# - ### Image ### - ############# +############# +### Image ### +############# ### Stability AI ### - 'sdxl': sdxl, - 'stable-diffusion-3': stable_diffusion_3, + 'sdxl': sdxl, + 'stable-diffusion-3': stable_diffusion_3, ### ByteDance ### - 'sdxl-lightning-4step': sdxl_lightning_4step, + 'sdxl-lightning-4step': sdxl_lightning_4step, ### ByteDance ### - 'sdxl-lightning-4step': sdxl_lightning_4step, + 'sdxl-lightning-4step': sdxl_lightning_4step, ### Playground ### - 'playground-v2.5-1024px-aesthetic': playground_v2_5_1024px_aesthetic, + 'playground-v2.5-1024px-aesthetic': playground_v2_5_1024px_aesthetic, } -- cgit v1.2.3