From a358b28f4762b11d0d398cab134770787ea36e18 Mon Sep 17 00:00:00 2001 From: kqlio67 <166700875+kqlio67@users.noreply.github.com> Date: Sun, 8 Dec 2024 03:43:51 +0000 Subject: Major Provider Updates and Model Support Enhancements (#2467) * refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. --------- Co-authored-by: kqlio67 <> --- g4f/Provider/PollinationsAI.py | 107 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 g4f/Provider/PollinationsAI.py (limited to 'g4f/Provider/PollinationsAI.py') diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py new file mode 100644 index 00000000..18349490 --- /dev/null +++ b/g4f/Provider/PollinationsAI.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +from urllib.parse import quote +import random +import requests +from aiohttp import ClientSession + +from ..typing import AsyncResult, Messages +from ..image import ImageResponse +from ..requests.raise_for_status import raise_for_status +from ..requests.aiohttp import get_connector +from .needs_auth.OpenaiAPI import OpenaiAPI +from .helper import format_prompt + +class PollinationsAI(OpenaiAPI): + label = "Pollinations.AI" + url = "https://pollinations.ai" + + working = True + needs_auth = False + supports_stream = True + + default_model = "openai" + + additional_models_image = ["unity", "midijourney", "rtist"] + additional_models_text = ["sur", "sur-mistral", "claude"] + + model_aliases = { + "gpt-4o": "openai", + "mistral-nemo": "mistral", + "llama-3.1-70b": "llama", # + "gpt-3.5-turbo": "searchgpt", + "gpt-4": "searchgpt", + "gpt-3.5-turbo": "claude", + "gpt-4": "claude", + "qwen-2.5-coder-32b": "qwen-coder", + "claude-3.5-sonnet": "sur", + } + + @classmethod + def get_models(cls): + if not hasattr(cls, 'image_models'): + cls.image_models = [] + if not cls.image_models: + url = "https://image.pollinations.ai/models" + response = requests.get(url) + raise_for_status(response) + cls.image_models = response.json() + cls.image_models.extend(cls.additional_models_image) + if not hasattr(cls, 'models'): + cls.models = [] + if not cls.models: + url = "https://text.pollinations.ai/models" + response = requests.get(url) + raise_for_status(response) + cls.models = [model.get("name") for model in response.json()] + cls.models.extend(cls.image_models) + cls.models.extend(cls.additional_models_text) + return cls.models + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + prompt: str = None, + api_base: str = "https://text.pollinations.ai/openai", + api_key: str = None, + proxy: str = None, + seed: str = None, + width: int = 1024, + height: int = 1024, + **kwargs + ) -> AsyncResult: + model = cls.get_model(model) + if model in cls.image_models: + async for response in cls._generate_image(model, messages, prompt, seed, width, height): + yield response + elif model in cls.models: + async for response in cls._generate_text(model, messages, api_base, api_key, proxy, **kwargs): + yield response + else: + raise ValueError(f"Unknown model: {model}") + + @classmethod + async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, seed: str = None, width: int = 1024, height: int = 1024): + if prompt is None: + prompt = messages[-1]["content"] + if seed is None: + seed = random.randint(0, 100000) + image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}" + yield ImageResponse(image, prompt) + + @classmethod + async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs): + if api_key is None: + async with ClientSession(connector=get_connector(proxy=proxy)) as session: + prompt = format_prompt(messages) + async with session.get(f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}") as response: + await raise_for_status(response) + async for line in response.content.iter_any(): + yield line.decode(errors="ignore") + else: + async for chunk in super().create_async_generator( + model, messages, api_base=api_base, proxy=proxy, **kwargs + ): + yield chunk -- cgit v1.2.3