From 2df2d6b0cf63fa90b0fe5160a02dade7d161f667 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Tue, 14 Jan 2025 17:07:39 +0100 Subject: Read FinishReason and Usage from Gemini API Add "Custom Provider": Set API Url in the settings Remove Discord link from result, add them to attr: Jmuz Fix Bug: File content are added to the prompt Changed response from /v1/models API Disable Pizzagpt Provider --- g4f/Provider/Jmuz.py | 24 +++++++++++++----------- g4f/Provider/Pizzagpt.py | 4 ++-- g4f/Provider/needs_auth/Custom.py | 7 ++++--- g4f/Provider/needs_auth/GeminiPro.py | 17 ++++++++++++++++- g4f/Provider/needs_auth/OpenaiAPI.py | 8 ++++++-- g4f/api/__init__.py | 16 ++++++++++------ g4f/gui/client/index.html | 13 +++++++++---- g4f/gui/client/static/js/chat.v1.js | 35 +++++++++++++---------------------- g4f/gui/server/api.py | 7 +++++-- g4f/gui/server/backend_api.py | 3 ++- g4f/models.py | 4 +--- 11 files changed, 81 insertions(+), 57 deletions(-) (limited to 'g4f') diff --git a/g4f/Provider/Jmuz.py b/g4f/Provider/Jmuz.py index c713398b..a5084fc0 100644 --- a/g4f/Provider/Jmuz.py +++ b/g4f/Provider/Jmuz.py @@ -5,7 +5,7 @@ from .needs_auth.OpenaiAPI import OpenaiAPI class Jmuz(OpenaiAPI): label = "Jmuz" - url = "https://jmuz.me" + url = "https://discord.gg/qXfu24JmsB" login_url = None api_base = "https://jmuz.me/gpt/api/v2" api_key = "prod" @@ -15,7 +15,7 @@ class Jmuz(OpenaiAPI): supports_stream = True supports_system_message = False - default_model = 'gpt-4o' + default_model = "gpt-4o" model_aliases = { "gemini": "gemini-exp", "deepseek-chat": "deepseek-2.5", @@ -29,13 +29,7 @@ class Jmuz(OpenaiAPI): return cls.models @classmethod - def get_model(cls, model: str, **kwargs) -> str: - if model in cls.get_models(): - return model - return cls.default_model - - @classmethod - def create_async_generator( + async def create_async_generator( cls, model: str, messages: Messages, @@ -52,7 +46,8 @@ class Jmuz(OpenaiAPI): "cache-control": "no-cache", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36" } - return super().create_async_generator( + started = False + async for chunk in super().create_async_generator( model=model, messages=messages, api_base=cls.api_base, @@ -60,4 +55,11 @@ class Jmuz(OpenaiAPI): stream=cls.supports_stream, headers=headers, **kwargs - ) + ): + if isinstance(chunk, str) and cls.url in chunk: + continue + if isinstance(chunk, str) and not started: + chunk = chunk.lstrip() + if chunk: + started = True + yield chunk diff --git a/g4f/Provider/Pizzagpt.py b/g4f/Provider/Pizzagpt.py index 9829e59d..65fffd1e 100644 --- a/g4f/Provider/Pizzagpt.py +++ b/g4f/Provider/Pizzagpt.py @@ -10,7 +10,7 @@ from .helper import format_prompt class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): url = "https://www.pizzagpt.it" api_endpoint = "/api/chatx-completion" - working = True + working = False default_model = 'gpt-4o-mini' @classmethod @@ -46,6 +46,6 @@ class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin): response_json = await response.json() content = response_json.get("answer", response_json).get("content") if content: - if "misuse detected. please get in touch" in content: + if "Misuse detected. please get in touch" in content: raise ValueError(content) yield content diff --git a/g4f/Provider/needs_auth/Custom.py b/g4f/Provider/needs_auth/Custom.py index d78e5e28..8332394b 100644 --- a/g4f/Provider/needs_auth/Custom.py +++ b/g4f/Provider/needs_auth/Custom.py @@ -3,9 +3,10 @@ from __future__ import annotations from .OpenaiAPI import OpenaiAPI class Custom(OpenaiAPI): - label = "Custom" + label = "Custom Provider" url = None - login_url = "http://localhost:8080" + login_url = None working = True api_base = "http://localhost:8080/v1" - needs_auth = False \ No newline at end of file + needs_auth = False + sort_models = False \ No newline at end of file diff --git a/g4f/Provider/needs_auth/GeminiPro.py b/g4f/Provider/needs_auth/GeminiPro.py index 502fcb5d..89dbf52e 100644 --- a/g4f/Provider/needs_auth/GeminiPro.py +++ b/g4f/Provider/needs_auth/GeminiPro.py @@ -3,12 +3,14 @@ from __future__ import annotations import base64 import json import requests +from typing import Optional from aiohttp import ClientSession, BaseConnector from ...typing import AsyncResult, Messages, ImagesType from ...image import to_bytes, is_accepted_format from ...errors import MissingAuthError from ...requests.raise_for_status import raise_for_status +from ...providers.response import Usage, FinishReason from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..helper import get_connector from ... import debug @@ -62,6 +64,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): api_base: str = api_base, use_auth_header: bool = False, images: ImagesType = None, + tools: Optional[list] = None, connector: BaseConnector = None, **kwargs ) -> AsyncResult: @@ -104,7 +107,10 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): "maxOutputTokens": kwargs.get("max_tokens"), "topP": kwargs.get("top_p"), "topK": kwargs.get("top_k"), - } + }, + "tools": [{ + "functionDeclarations": tools + }] if tools else None } system_prompt = "\n".join( message["content"] @@ -128,6 +134,15 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): data = b"".join(lines) data = json.loads(data) yield data["candidates"][0]["content"]["parts"][0]["text"] + if "finishReason" in data["candidates"][0]: + yield FinishReason(data["candidates"][0]["finishReason"].lower()) + usage = data.get("usageMetadata") + if usage: + yield Usage( + prompt_tokens=usage.get("promptTokenCount"), + completion_tokens=usage.get("candidatesTokenCount"), + total_tokens=usage.get("totalTokenCount") + ) except: data = data.decode(errors="ignore") if isinstance(data, bytes) else data raise RuntimeError(f"Read chunk failed: {data}") diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py index 3cc558bd..1ca256d4 100644 --- a/g4f/Provider/needs_auth/OpenaiAPI.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -23,6 +23,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): supports_system_message = True default_model = "" fallback_models = [] + sort_models = True @classmethod def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]: @@ -36,8 +37,11 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): response = requests.get(f"{api_base}/models", headers=headers) raise_for_status(response) data = response.json() - cls.models = [model.get("id") for model in (data.get("data") if isinstance(data, dict) else data)] - cls.models.sort() + data = data.get("data") if isinstance(data, dict) else data + cls.image_models = [model.get("id") for model in data if model.get("image")] + cls.models = [model.get("id") for model in data] + if cls.sort_models: + cls.models.sort() except Exception as e: debug.log(e) cls.models = cls.fallback_models diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index d4a3bd76..374d7c64 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -215,12 +215,16 @@ class Api: HTTP_200_OK: {"model": List[ModelResponseModel]}, }) async def models(): - return [{ - 'id': model_id, - 'object': 'model', - 'created': 0, - 'owned_by': model.base_provider - } for model_id, model in g4f.models.ModelUtils.convert.items()] + return { + "object": "list", + "data": [{ + "id": model_id, + "object": "model", + "created": 0, + "owned_by": model.base_provider, + "image": isinstance(model, g4f.models.ImageModel), + } for model_id, model in g4f.models.ModelUtils.convert.items()] + } @self.app.get("/v1/models/{model_name}", responses={ HTTP_200_OK: {"model": ModelResponseModel}, diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index bbf41314..5517eca2 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -143,7 +143,7 @@
- +
@@ -157,6 +157,14 @@ document.getElementById('recognition-language').placeholder = navigator.language;
+
+ + +
+