summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-09-04 01:09:29 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-09-04 01:09:29 +0200
commit7483a7c310d581c6012ed51607b6b57b3cab8018 (patch)
tree5a7607911a4a7ad64f3e8d543faf3229443beea8
parentNew TwitterBio provider with support for gpt-3.5-turbo and mixtral-8x7b models (diff)
downloadgpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.gz
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.bz2
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.lz
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.xz
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.tar.zst
gpt4free-7483a7c310d581c6012ed51607b6b57b3cab8018.zip
Diffstat (limited to '')
-rw-r--r--g4f/Provider/AiChats.py66
-rw-r--r--g4f/Provider/Nexra.py111
-rw-r--r--g4f/Provider/Snova.py133
-rw-r--r--g4f/Provider/__init__.py3
-rw-r--r--g4f/models.py76
5 files changed, 380 insertions, 9 deletions
diff --git a/g4f/Provider/AiChats.py b/g4f/Provider/AiChats.py
new file mode 100644
index 00000000..f3eb3fc0
--- /dev/null
+++ b/g4f/Provider/AiChats.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+import json
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider
+from .helper import format_prompt
+
+
+class AiChats(AsyncGeneratorProvider):
+ url = "https://ai-chats.org"
+ api_endpoint = "https://ai-chats.org/chat/send2/"
+ working = True
+ supports_gpt_4 = True
+ supports_message_history = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "referer": f"{cls.url}/chat/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "type": "chat",
+ "messagesHistory": [
+ {
+ "from": "you",
+ "content": prompt
+ }
+ ]
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = await response.text()
+
+ message = ""
+ for line in full_response.split('\n'):
+ if line.startswith('data: ') and line != 'data: ':
+ message += line[6:]
+
+ message = message.strip()
+
+ yield message
diff --git a/g4f/Provider/Nexra.py b/g4f/Provider/Nexra.py
new file mode 100644
index 00000000..4914b930
--- /dev/null
+++ b/g4f/Provider/Nexra.py
@@ -0,0 +1,111 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Nexra(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://nexra.aryahcr.cc"
+ api_endpoint = "https://nexra.aryahcr.cc/api/chat/gpt"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ # Working with text
+ 'gpt-4',
+ 'gpt-4-0613',
+ 'gpt-4-32k',
+ 'gpt-4-0314',
+ 'gpt-4-32k-0314',
+
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-16k',
+ 'gpt-3.5-turbo-0613',
+ 'gpt-3.5-turbo-16k-0613',
+ 'gpt-3.5-turbo-0301',
+
+ 'gpt-3',
+ 'text-davinci-003',
+ 'text-davinci-002',
+ 'code-davinci-002',
+ 'text-curie-001',
+ 'text-babbage-001',
+ 'text-ada-001',
+ 'davinci',
+ 'curie',
+ 'babbage',
+ 'ada',
+ 'babbage-002',
+ 'davinci-002',
+ ]
+
+ model_aliases = {
+ "gpt-4": "gpt-4-0613",
+ "gpt-4": "gpt-4-32k",
+ "gpt-4": "gpt-4-0314",
+ "gpt-4": "gpt-4-32k-0314",
+
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k-0613",
+ "gpt-3.5-turbo": "gpt-3.5-turbo-0301",
+
+ "gpt-3": "text-davinci-003",
+ "gpt-3": "text-davinci-002",
+ "gpt-3": "code-davinci-002",
+ "gpt-3": "text-curie-001",
+ "gpt-3": "text-babbage-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "text-ada-001",
+ "gpt-3": "davinci",
+ "gpt-3": "curie",
+ "gpt-3": "babbage",
+ "gpt-3": "ada",
+ "gpt-3": "babbage-002",
+ "gpt-3": "davinci-002",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "Content-Type": "application/json",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": messages,
+ "prompt": format_prompt(messages),
+ "model": model,
+ "markdown": False,
+ "stream": False,
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.text()
+ json_result = json.loads(result)
+ yield json_result["gpt"]
diff --git a/g4f/Provider/Snova.py b/g4f/Provider/Snova.py
new file mode 100644
index 00000000..76dfac40
--- /dev/null
+++ b/g4f/Provider/Snova.py
@@ -0,0 +1,133 @@
+from __future__ import annotations
+
+import json
+from typing import AsyncGenerator
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class Snova(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://fast.snova.ai"
+ api_endpoint = "https://fast.snova.ai/api/completion"
+ working = True
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'Meta-Llama-3.1-8B-Instruct'
+ models = [
+ 'Meta-Llama-3.1-8B-Instruct',
+ 'Meta-Llama-3.1-70B-Instruct',
+ 'Meta-Llama-3.1-405B-Instruct',
+ 'Samba-CoE',
+ 'ignos/Mistral-T5-7B-v1',
+ 'v1olet/v1olet_merged_dpo_7B',
+ 'macadeliccc/WestLake-7B-v2-laser-truthy-dpo',
+ 'cookinai/DonutLM-v1',
+ ]
+
+ model_aliases = {
+ "llama-3.1-8b": "Meta-Llama-3.1-8B-Instruct",
+ "llama-3.1-70b": "Meta-Llama-3.1-70B-Instruct",
+ "llama-3.1-405b": "Meta-Llama-3.1-405B-Instruct",
+
+ "mistral-7b": "ignos/Mistral-T5-7B-v1",
+
+ "samba-coe-v0.1": "Samba-CoE",
+ "v1olet-merged-7b": "v1olet/v1olet_merged_dpo_7B",
+ "westlake-7b-v2": "macadeliccc/WestLake-7B-v2-laser-truthy-dpo",
+ "donutlm-v1": "cookinai/DonutLM-v1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncGenerator[str, None]:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "body": {
+ "messages": [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant."
+ },
+ {
+ "role": "user",
+ "content": format_prompt(messages),
+ "id": "1-id",
+ "ref": "1-ref",
+ "revision": 1,
+ "draft": False,
+ "status": "done",
+ "enableRealTimeChat": False,
+ "meta": None
+ }
+ ],
+ "max_tokens": 1000,
+ "stop": ["<|eot_id|>"],
+ "stream": True,
+ "stream_options": {"include_usage": True},
+ "model": model
+ },
+ "env_type": "tp16"
+ }
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ""
+ async for line in response.content:
+ line = line.decode().strip()
+ if line.startswith("data: "):
+ data = line[6:]
+ if data == "[DONE]":
+ break
+ try:
+ json_data = json.loads(data)
+ choices = json_data.get("choices", [])
+ if choices:
+ delta = choices[0].get("delta", {})
+ content = delta.get("content", "")
+ full_response += content
+ except json.JSONDecodeError:
+ continue
+ except Exception as e:
+ print(f"Error processing chunk: {e}")
+ print(f"Problematic data: {data}")
+ continue
+
+ yield full_response.strip()
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index 83d3e6e1..10499fdf 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -12,6 +12,7 @@ from .needs_auth import *
from .AI365VIP import AI365VIP
from .Allyfy import Allyfy
from .AiChatOnline import AiChatOnline
+from .AiChats import AiChats
from .Aura import Aura
from .Bing import Bing
from .BingCreateImages import BingCreateImages
@@ -42,11 +43,13 @@ from .MagickPenAsk import MagickPenAsk
from .MagickPenChat import MagickPenChat
from .MetaAI import MetaAI
from .MetaAIAccount import MetaAIAccount
+from .Nexra import Nexra
from .Ollama import Ollama
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Pizzagpt import Pizzagpt
from .Reka import Reka
+from .Snova import Snova
from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .Rocks import Rocks
diff --git a/g4f/models.py b/g4f/models.py
index a6c259e3..d0cb9f49 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -29,6 +29,7 @@ from .Provider import (
MagickPenAsk,
MagickPenChat,
MetaAI,
+ Nexra,
OpenaiChat,
PerplexityLabs,
Pi,
@@ -36,6 +37,7 @@ from .Provider import (
Reka,
Replicate,
ReplicateHome,
+ Snova,
TeachAnything,
TwitterBio,
Upstage,
@@ -86,20 +88,28 @@ default = Model(
############
### OpenAI ###
-### GPT-3.5 / GPT-4 ###
+# gpt-3
+gpt_3 = Model(
+ name = 'gpt-3',
+ base_provider = 'OpenAI',
+ best_provider = IterListProvider([
+ Nexra,
+ ])
+)
+
# gpt-3.5
gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
- base_provider = 'openai',
+ base_provider = 'OpenAI',
best_provider = IterListProvider([
- Allyfy, TwitterBio,
+ Allyfy, TwitterBio, Nexra,
])
)
# gpt-4
gpt_4o = Model(
name = 'gpt-4o',
- base_provider = 'openai',
+ base_provider = 'OpenAI',
best_provider = IterListProvider([
Liaobots, Chatgpt4o, OpenaiChat,
])
@@ -107,7 +117,7 @@ gpt_4o = Model(
gpt_4o_mini = Model(
name = 'gpt-4o-mini',
- base_provider = 'openai',
+ base_provider = 'OpenAI',
best_provider = IterListProvider([
DDG, Liaobots, You, FreeNetfly, MagickPenAsk, MagickPenChat, Pizzagpt, ChatgptFree, AiChatOnline, OpenaiChat, Koala,
])
@@ -115,17 +125,17 @@ gpt_4o_mini = Model(
gpt_4_turbo = Model(
name = 'gpt-4-turbo',
- base_provider = 'openai',
+ base_provider = 'OpenAI',
best_provider = IterListProvider([
- Liaobots, Bing
+ Nexra, Liaobots, Bing
])
)
gpt_4 = Model(
name = 'gpt-4',
- base_provider = 'openai',
+ base_provider = 'OpenAI',
best_provider = IterListProvider([
- Chatgpt4Online, Bing,
+ Chatgpt4Online, Nexra, Bing,
gpt_4_turbo.best_provider, gpt_4o.best_provider, gpt_4o_mini.best_provider
])
)
@@ -358,6 +368,35 @@ pi = Model(
best_provider = Pi
)
+### SambaNova ###
+samba_coe_v0_1 = Model(
+ name = 'samba-coe-v0.1',
+ base_provider = 'SambaNova',
+ best_provider = Snova
+)
+
+### Trong-Hieu Nguyen-Mau ###
+v1olet_merged_7b = Model(
+ name = 'v1olet-merged-7b',
+ base_provider = 'Trong-Hieu Nguyen-Mau',
+ best_provider = Snova
+)
+
+### Macadeliccc ###
+westlake_7b_v2 = Model(
+ name = 'westlake-7b-v2',
+ base_provider = 'Macadeliccc',
+ best_provider = Snova
+)
+
+### CookinAI ###
+donutlm_v1 = Model(
+ name = 'donutlm-v1',
+ base_provider = 'CookinAI',
+ best_provider = Snova
+)
+
+
#############
### Image ###
@@ -436,6 +475,9 @@ class ModelUtils:
############
### OpenAI ###
+# gpt-3
+'gpt-3': gpt_3,
+
# gpt-3.5
'gpt-3.5-turbo': gpt_35_turbo,
@@ -536,6 +578,22 @@ class ModelUtils:
### Pi ###
'pi': pi,
+
+
+### SambaNova ###
+'samba-coe-v0.1': samba_coe_v0_1,
+
+
+### Trong-Hieu Nguyen-Mau ###
+'v1olet-merged-7b': v1olet_merged_7b,
+
+
+### Macadeliccc ###
+'westlake-7b-v2': westlake_7b_v2,
+
+
+### CookinAI ###
+'donutlm-v1': donutlm_v1,