summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--g4f/Provider/TwitterBio.py103
-rw-r--r--g4f/Provider/__init__.py1
-rw-r--r--g4f/models.py21
3 files changed, 115 insertions, 10 deletions
diff --git a/g4f/Provider/TwitterBio.py b/g4f/Provider/TwitterBio.py
new file mode 100644
index 00000000..c143e4ff
--- /dev/null
+++ b/g4f/Provider/TwitterBio.py
@@ -0,0 +1,103 @@
+from __future__ import annotations
+
+import json
+import re
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+class TwitterBio(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://www.twitterbio.io"
+ api_endpoint_mistral = "https://www.twitterbio.io/api/mistral"
+ api_endpoint_openai = "https://www.twitterbio.io/api/openai"
+ working = True
+ supports_gpt_35_turbo = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'mistralai/Mixtral-8x7B-Instruct-v0.1',
+ 'gpt-3.5-turbo',
+ ]
+
+ model_aliases = {
+ "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ return cls.default_model
+
+ @staticmethod
+ def format_text(text: str) -> str:
+ text = re.sub(r'\s+', ' ', text.strip())
+ text = re.sub(r'\s+([,.!?])', r'\1', text)
+ return text
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
+ }
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "prompt": f'{prompt}.'
+ }
+
+ if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
+ api_endpoint = cls.api_endpoint_mistral
+ elif model == 'gpt-3.5-turbo':
+ api_endpoint = cls.api_endpoint_openai
+ else:
+ raise ValueError(f"Unsupported model: {model}")
+
+ async with session.post(api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ buffer = ""
+ async for line in response.content:
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ try:
+ json_data = json.loads(line[6:])
+ if model == 'mistralai/Mixtral-8x7B-Instruct-v0.1':
+ if 'choices' in json_data and len(json_data['choices']) > 0:
+ text = json_data['choices'][0].get('text', '')
+ if text:
+ buffer += text
+ elif model == 'gpt-3.5-turbo':
+ text = json_data.get('text', '')
+ if text:
+ buffer += text
+ except json.JSONDecodeError:
+ continue
+ elif line == 'data: [DONE]':
+ break
+
+ if buffer:
+ yield cls.format_text(buffer)
diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py
index e5234685..83d3e6e1 100644
--- a/g4f/Provider/__init__.py
+++ b/g4f/Provider/__init__.py
@@ -51,6 +51,7 @@ from .Replicate import Replicate
from .ReplicateHome import ReplicateHome
from .Rocks import Rocks
from .TeachAnything import TeachAnything
+from .TwitterBio import TwitterBio
from .Upstage import Upstage
from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
diff --git a/g4f/models.py b/g4f/models.py
index 4176f0a2..a6c259e3 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -37,6 +37,7 @@ from .Provider import (
Replicate,
ReplicateHome,
TeachAnything,
+ TwitterBio,
Upstage,
You,
)
@@ -91,7 +92,7 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = IterListProvider([
- Allyfy,
+ Allyfy, TwitterBio,
])
)
@@ -140,50 +141,50 @@ gigachat = Model(
### Meta ###
meta = Model(
name = "meta-ai",
- base_provider = "meta",
+ base_provider = "Meta",
best_provider = MetaAI
)
llama_3_8b = Model(
name = "llama-3-8b",
- base_provider = "meta",
+ base_provider = "Meta",
best_provider = IterListProvider([DeepInfra, Replicate])
)
llama_3_70b = Model(
name = "llama-3-70b",
- base_provider = "meta",
+ base_provider = "Meta",
best_provider = IterListProvider([ReplicateHome, DeepInfra, PerplexityLabs, Replicate])
)
llama_3_1_8b = Model(
name = "llama-3.1-8b",
- base_provider = "meta",
+ base_provider = "Meta",
best_provider = IterListProvider([Blackbox])
)
llama_3_1_70b = Model(
name = "llama-3.1-70b",
- base_provider = "meta",
+ base_provider = "Meta",
best_provider = IterListProvider([DDG, HuggingChat, FreeGpt, Blackbox, TeachAnything, HuggingFace])
)
llama_3_1_405b = Model(
name = "llama-3.1-405b",
- base_provider = "meta",
+ base_provider = "Meta",
best_provider = IterListProvider([HuggingChat, Blackbox, HuggingFace])
)
### Mistral ###
mixtral_8x7b = Model(
name = "mixtral-8x7b",
- base_provider = "huggingface",
- best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, DeepInfra, HuggingFace,])
+ base_provider = "Mistral",
+ best_provider = IterListProvider([HuggingChat, DDG, ReplicateHome, TwitterBio, DeepInfra, HuggingFace,])
)
mistral_7b = Model(
name = "mistral-7b",
- base_provider = "huggingface",
+ base_provider = "Mistral",
best_provider = IterListProvider([HuggingChat, HuggingFace, DeepInfra])
)