From 989f02fc31fa9960f8b2b141297d345ea8843971 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sat, 1 Mar 2025 01:46:04 +0100 Subject: Add ToolSupportProvider --- g4f/Provider/PollinationsAI.py | 14 +++--- g4f/Provider/hf/HuggingFaceAPI.py | 4 +- g4f/Provider/hf/models.py | 1 + g4f/Provider/template/OpenaiTemplate.py | 1 - g4f/client/__init__.py | 10 +++-- g4f/debug.py | 8 ++-- g4f/gui/client/demo.html | 10 +++-- g4f/integration/__init__.py | 0 g4f/providers/tool_support.py | 77 +++++++++++++++++++++++++++++++++ g4f/tools/pydantic_ai.py | 1 - 10 files changed, 105 insertions(+), 21 deletions(-) create mode 100644 g4f/integration/__init__.py create mode 100644 g4f/providers/tool_support.py delete mode 100644 g4f/tools/pydantic_ai.py (limited to 'g4f') diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index f71b2275..c5bd6ccb 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -1,6 +1,5 @@ from __future__ import annotations -import json import random import requests from urllib.parse import quote_plus @@ -15,6 +14,7 @@ from ..errors import ModelNotFoundError from ..requests.raise_for_status import raise_for_status from ..requests.aiohttp import get_connector from ..providers.response import ImageResponse, ImagePreview, FinishReason, Usage +from .. import debug DEFAULT_HEADERS = { 'Accept': '*/*', @@ -74,9 +74,11 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): try: # Update of image models image_response = requests.get("https://image.pollinations.ai/models") - image_response.raise_for_status() - new_image_models = image_response.json() - + if image_response.ok: + new_image_models = image_response.json() + else: + new_image_models = [] + # Combine models without duplicates all_image_models = ( cls.image_models + # Already contains the default @@ -112,8 +114,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): cls.text_models = [cls.default_model] if not cls.image_models: cls.image_models = [cls.default_image_model] - raise RuntimeError(f"Failed to fetch models: {e}") from e - + debug.error(f"Failed to fetch models: {e}") + return cls.text_models + cls.image_models @classmethod diff --git a/g4f/Provider/hf/HuggingFaceAPI.py b/g4f/Provider/hf/HuggingFaceAPI.py index e775a7ae..665d2294 100644 --- a/g4f/Provider/hf/HuggingFaceAPI.py +++ b/g4f/Provider/hf/HuggingFaceAPI.py @@ -61,10 +61,10 @@ class HuggingFaceAPI(OpenaiTemplate): images: ImagesType = None, **kwargs ): - if model in cls.model_aliases: - model = cls.model_aliases[model] if model == llama_models["name"]: model = llama_models["text"] if images is None else llama_models["vision"] + if model in cls.model_aliases: + model = cls.model_aliases[model] api_base = f"https://api-inference.huggingface.co/models/{model}/v1" pipeline_tag = await cls.get_pipline_tag(model, api_key) if pipeline_tag not in ("text-generation", "image-text-to-text"): diff --git a/g4f/Provider/hf/models.py b/g4f/Provider/hf/models.py index 53c33a21..def7c05c 100644 --- a/g4f/Provider/hf/models.py +++ b/g4f/Provider/hf/models.py @@ -20,6 +20,7 @@ fallback_models = text_models + image_models model_aliases = { ### Chat ### "qwen-2.5-72b": "Qwen/Qwen2.5-Coder-32B-Instruct", + "llama-3": "meta-llama/Llama-3.3-70B-Instruct", "llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", "deepseek-r1": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", diff --git a/g4f/Provider/template/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py index d8427727..359fdfd7 100644 --- a/g4f/Provider/template/OpenaiTemplate.py +++ b/g4f/Provider/template/OpenaiTemplate.py @@ -145,7 +145,6 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin elif content_type.startswith("text/event-stream"): await raise_for_status(response) first = True - is_thinking = 0 async for line in response.iter_lines(): if line.startswith(b"data: "): chunk = line[6:] diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index 8bd6d0dc..8aceaaf2 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -275,6 +275,7 @@ class Completions: def create( self, + *, messages: Messages, model: str, provider: Optional[ProviderType] = None, @@ -306,8 +307,8 @@ class Completions: response = iter_run_tools( provider.get_create_function(), - model, - messages, + model=model, + messages=messages, stream=stream, **filter_none( proxy=self.client.proxy if proxy is None else proxy, @@ -561,6 +562,7 @@ class AsyncCompletions: def create( self, + *, messages: Messages, model: str, provider: Optional[ProviderType] = None, @@ -592,8 +594,8 @@ class AsyncCompletions: response = async_iter_run_tools( provider, - model, - messages, + model=model, + messages=messages, stream=stream, **filter_none( proxy=self.client.proxy if proxy is None else proxy, diff --git a/g4f/debug.py b/g4f/debug.py index 10cd37f6..36abb3a9 100644 --- a/g4f/debug.py +++ b/g4f/debug.py @@ -1,10 +1,7 @@ import sys -from .providers.types import ProviderType logging: bool = False version_check: bool = True -last_provider: ProviderType = None -last_model: str = None version: str = None log_handler: callable = print logs: list = [] @@ -14,4 +11,7 @@ def log(text, file = None): log_handler(text, file=file) def error(error, name: str = None): - log(error if isinstance(error, str) else f"{type(error).__name__ if name is None else name}: {error}", file=sys.stderr) \ No newline at end of file + log( + error if isinstance(error, str) else f"{type(error).__name__ if name is None else name}: {error}", + file=sys.stderr + ) \ No newline at end of file diff --git a/g4f/gui/client/demo.html b/g4f/gui/client/demo.html index 94ff5c39..7464c783 100644 --- a/g4f/gui/client/demo.html +++ b/g4f/gui/client/demo.html @@ -201,7 +201,7 @@ - Image Feed +
@@ -336,14 +336,15 @@ const images = [] eventSource.onmessage = (event) => { const data = JSON.parse(event.data); - if (data.nsfw || !data.nologo || data.width < 1024 || !data.imageURL) { + if (data.nsfw || !data.nologo || data.width < 1024 || !data.imageURL || data.isChild) { return; } const lower = data.prompt.toLowerCase(); - const tags = ["logo", "infographic", "warts","prostitute", "curvy", "breasts", "written", "bodies", "naked", "classroom", "malone", "dirty", "shoes", "shower", "banner", "fat", "nipples", "couple", "sexual", "sandal", "supplier", "overlord", "succubus", "platinum", "cracy", "crazy", "lamic", "ropes", "cables", "wires", "dirty", "messy", "cluttered", "chaotic", "disorganized", "disorderly", "untidy", "unorganized", "unorderly", "unsystematic", "disarranged", "disarrayed", "disheveled", "disordered", "jumbled", "muddled", "scattered", "shambolic", "sloppy", "unkept", "unruly"]; + const tags = ["nsfw", "timeline", "soap", "orally", "heel", "latex", "bathroom", "boobs", "charts", " text ", "gel", "logo", "infographic", "warts", " bra ", "prostitute", "curvy", "breasts", "written", "bodies", "naked", "classroom", "malone", "dirty", "shoes", "shower", "banner", "fat", "nipples", "couple", "sexual", "sandal", "supplier", "overlord", "succubus", "platinum", "cracy", "crazy", "lamic", "ropes", "cables", "wires", "dirty", "messy", "cluttered", "chaotic", "disorganized", "disorderly", "untidy", "unorganized", "unorderly", "unsystematic", "disarranged", "disarrayed", "disheveled", "disordered", "jumbled", "muddled", "scattered", "shambolic", "sloppy", "unkept", "unruly"]; for (i in tags) { if (lower.indexOf(tags[i]) != -1) { console.log("Skipping image with tag: " + tags[i]); + console.debug("Skipping image:", data.imageURL); return; } } @@ -354,7 +355,10 @@ } setInterval(() => { if (images.length > 0) { + imageFeed.classList.remove("hidden"); imageFeed.src = images.shift(); + } else if(imageFeed) { + imageFeed.remove(); } }, 7000); })(); diff --git a/g4f/integration/__init__.py b/g4f/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/g4f/providers/tool_support.py b/g4f/providers/tool_support.py new file mode 100644 index 00000000..c4360ab8 --- /dev/null +++ b/g4f/providers/tool_support.py @@ -0,0 +1,77 @@ +from __future__ import annotations + +import json + +from ..typing import AsyncResult, Messages, ImagesType +from ..providers.asyncio import to_async_iterator +from ..client.service import get_model_and_provider +from ..client.helper import filter_json +from .base_provider import AsyncGeneratorProvider +from .response import ToolCalls, FinishReason + +class ToolSupportProvider(AsyncGeneratorProvider): + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + stream: bool = True, + images: ImagesType = None, + tools: list[str] = None, + response_format: dict = None, + **kwargs + ) -> AsyncResult: + provider = None + if ":" in model: + provider, model = model.split(":", 1) + model, provider = get_model_and_provider( + model, provider, + stream, logging=False, + has_images=images is not None + ) + if response_format is None: + response_format = {"type": "json"} + + if tools is not None: + if len(tools) > 1: + raise ValueError("Only one tool is supported.") + tools = tools.pop() + lines = ["Respone in JSON format."] + properties = tools["function"]["parameters"]["properties"] + properties = {key: value["type"] for key, value in properties.items()} + lines.append(f"Response format: {json.dumps(properties, indent=2)}") + messages = [{"role": "user", "content": "\n".join(lines)}] + messages + + finish = None + chunks = [] + async for chunk in provider.get_async_create_function()( + model, + messages, + stream=stream, + images=images, + response_format=response_format, + **kwargs + ): + if isinstance(chunk, FinishReason): + finish = chunk + break + elif isinstance(chunk, str): + chunks.append(chunk) + else: + yield chunk + + chunks = "".join(chunks) + if tools is not None: + yield ToolCalls([{ + "id": "", + "type": "function", + "function": { + "name": tools["function"]["name"], + "arguments": filter_json(chunks) + } + }]) + yield chunks + if finish is not None: + yield finish \ No newline at end of file diff --git a/g4f/tools/pydantic_ai.py b/g4f/tools/pydantic_ai.py deleted file mode 100644 index 7572c987..00000000 --- a/g4f/tools/pydantic_ai.py +++ /dev/null @@ -1 +0,0 @@ -from ..integration.pydantic_ai import AIModel, patch_infer_model \ No newline at end of file -- cgit v1.2.3 From d11aabcd56827e67e79e142dda52614a0e9f97c3 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sat, 1 Mar 2025 09:38:57 +0100 Subject: Add ToolSupportProvider --- g4f/providers/tool_support.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'g4f') diff --git a/g4f/providers/tool_support.py b/g4f/providers/tool_support.py index c4360ab8..2aa83cef 100644 --- a/g4f/providers/tool_support.py +++ b/g4f/providers/tool_support.py @@ -31,12 +31,11 @@ class ToolSupportProvider(AsyncGeneratorProvider): stream, logging=False, has_images=images is not None ) - if response_format is None: - response_format = {"type": "json"} - if tools is not None: if len(tools) > 1: raise ValueError("Only one tool is supported.") + if response_format is None: + response_format = {"type": "json"} tools = tools.pop() lines = ["Respone in JSON format."] properties = tools["function"]["parameters"]["properties"] -- cgit v1.2.3 From fa086e23af39f91236e5e8209ac0e886efd83428 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Sat, 1 Mar 2025 09:45:52 +0100 Subject: Fix unit tests --- g4f/client/__init__.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'g4f') diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py index 8aceaaf2..22dde6c1 100644 --- a/g4f/client/__init__.py +++ b/g4f/client/__init__.py @@ -275,7 +275,6 @@ class Completions: def create( self, - *, messages: Messages, model: str, provider: Optional[ProviderType] = None, @@ -562,7 +561,6 @@ class AsyncCompletions: def create( self, - *, messages: Messages, model: str, provider: Optional[ProviderType] = None, -- cgit v1.2.3