summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--g4f/Provider/Copilot.py5
-rw-r--r--g4f/Provider/needs_auth/Gemini.py5
-rw-r--r--g4f/Provider/needs_auth/GithubCopilot.py93
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py26
-rw-r--r--g4f/Provider/needs_auth/__init__.py1
-rw-r--r--g4f/api/__init__.py103
-rw-r--r--g4f/image.py10
-rw-r--r--requirements-slim.txt3
-rw-r--r--requirements.txt3
-rw-r--r--setup.py17
10 files changed, 225 insertions, 41 deletions
diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py
index e10a55e8..2f37b1eb 100644
--- a/g4f/Provider/Copilot.py
+++ b/g4f/Provider/Copilot.py
@@ -73,10 +73,9 @@ class Copilot(AbstractProvider):
else:
access_token = conversation.access_token
debug.log(f"Copilot: Access token: {access_token[:7]}...{access_token[-5:]}")
- debug.log(f"Copilot: Cookies: {';'.join([*cookies])}")
websocket_url = f"{websocket_url}&accessToken={quote(access_token)}"
- headers = {"authorization": f"Bearer {access_token}", "cookie": format_cookies(cookies)}
-
+ headers = {"authorization": f"Bearer {access_token}"}
+
with Session(
timeout=timeout,
proxy=proxy,
diff --git a/g4f/Provider/needs_auth/Gemini.py b/g4f/Provider/needs_auth/Gemini.py
index 1e89ab05..89f6f802 100644
--- a/g4f/Provider/needs_auth/Gemini.py
+++ b/g4f/Provider/needs_auth/Gemini.py
@@ -206,6 +206,8 @@ class Gemini(AsyncGeneratorProvider):
@classmethod
async def synthesize(cls, params: dict, proxy: str = None) -> AsyncIterator[bytes]:
+ if "text" not in params:
+ raise ValueError("Missing parameter text")
async with ClientSession(
cookies=cls._cookies,
headers=REQUEST_HEADERS,
@@ -213,9 +215,6 @@ class Gemini(AsyncGeneratorProvider):
) as session:
if not cls._snlm0e:
await cls.fetch_snlm0e(session, cls._cookies) if cls._cookies else None
- if not cls._snlm0e:
- async for chunk in cls.nodriver_login(proxy):
- debug.log(chunk)
inner_data = json.dumps([None, params["text"], "de-DE", None, 2])
async with session.post(
"https://gemini.google.com/_/BardChatUi/data/batchexecute",
diff --git a/g4f/Provider/needs_auth/GithubCopilot.py b/g4f/Provider/needs_auth/GithubCopilot.py
new file mode 100644
index 00000000..0c12dfd0
--- /dev/null
+++ b/g4f/Provider/needs_auth/GithubCopilot.py
@@ -0,0 +1,93 @@
+from __future__ import annotations
+
+import json
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ...requests import StreamSession
+from ...providers.helper import format_prompt
+from ...cookies import get_cookies
+
+class Conversation(BaseConversation):
+ conversation_id: str
+
+ def __init__(self, conversation_id: str):
+ self.conversation_id = conversation_id
+
+class GithubCopilot(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://copilot.microsoft.com"
+ working = True
+ needs_auth = True
+ supports_stream = True
+ default_model = "gpt-4o"
+ models = [default_model, "o1-mini", "o1-preview", "claude-3.5-sonnet"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = False,
+ api_key: str = "X2eRgXPamxGK_TXS6seGGYy541mQuVJdH1CYljrvSPuc38je5J4KK4Aw0y5X2oVRFMjA4B1fo9sdsr4VJcl-VBae7H0Mr4U9GIkFnGx3hSs=",
+ proxy: str = None,
+ cookies: Cookies = None,
+ conversation_id: str = None,
+ conversation: Conversation = None,
+ return_conversation: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ if not model:
+ model = cls.default_model
+ if cookies is None:
+ cookies = get_cookies(".github.com")
+ async with StreamSession(
+ proxy=proxy,
+ impersonate="chrome",
+ cookies=cookies,
+ headers={
+ "GitHub-Verified-Fetch": "true",
+ }
+ ) as session:
+ headers = {}
+ if api_key is None:
+ async with session.post("https://github.com/github-copilot/chat/token") as response:
+ await raise_for_status(response, "Get token")
+ api_key = (await response.json()).get("token")
+ headers = {
+ "Authorization": f"GitHub-Bearer {api_key}",
+ }
+ if conversation is not None:
+ conversation_id = conversation.conversation_id
+ if conversation_id is None:
+ print(headers)
+ async with session.post("https://api.individual.githubcopilot.com/github/chat/threads", headers=headers) as response:
+ await raise_for_status(response)
+ conversation_id = (await response.json()).get("thread_id")
+ if return_conversation:
+ yield Conversation(conversation_id)
+ content = messages[-1]["content"]
+ else:
+ content = format_prompt(messages)
+ json_data = {
+ "content": content,
+ "intent": "conversation",
+ "references":[],
+ "context": [],
+ "currentURL": f"https://github.com/copilot/c/{conversation_id}",
+ "streaming": True,
+ "confirmations": [],
+ "customInstructions": [],
+ "model": model,
+ "mode": "immersive"
+ }
+ async with session.post(
+ f"https://api.individual.githubcopilot.com/github/chat/threads/{conversation_id}/messages",
+ json=json_data,
+ headers=headers
+ ) as response:
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ data = json.loads(line[6:])
+ if data.get("type") == "content":
+ yield data.get("body") \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 074c9161..37bdf074 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -111,7 +111,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
# Post the image data to the service and get the image data
async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
cls._update_request_args(session)
- await raise_for_status(response)
+ await raise_for_status(response, "Create file failed")
image_data = {
**data,
**await response.json(),
@@ -129,7 +129,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"x-ms-blob-type": "BlockBlob"
}
) as response:
- await raise_for_status(response)
+ await raise_for_status(response, "Send file failed")
# Post the file ID to the service and get the download URL
async with session.post(
f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded",
@@ -137,12 +137,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
headers=headers
) as response:
cls._update_request_args(session)
- await raise_for_status(response)
+ await raise_for_status(response, "Get download url failed")
image_data["download_url"] = (await response.json())["download_url"]
return ImageRequest(image_data)
@classmethod
- def create_messages(cls, messages: Messages, image_request: ImageRequest = None):
+ def create_messages(cls, messages: Messages, image_request: ImageRequest = None, system_hints: list = None):
"""
Create a list of messages for the user input
@@ -160,7 +160,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"id": str(uuid.uuid4()),
"create_time": int(time.time()),
"id": str(uuid.uuid4()),
- "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}}
+ "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, "system_hints": system_hints},
} for message in messages]
# Check if there is an image response
@@ -189,7 +189,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return messages
@classmethod
- async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict) -> ImageResponse:
+ async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict, prompt: str = None) -> ImageResponse:
"""
Retrieves the image response based on the message content.
@@ -211,6 +211,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
try:
prompt = element["metadata"]["dalle"]["prompt"]
file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ except TypeError:
+ return
except Exception as e:
raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
try:
@@ -240,6 +242,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image_name: str = None,
return_conversation: bool = False,
max_retries: int = 3,
+ web_search: bool = False,
**kwargs
) -> AsyncResult:
"""
@@ -331,14 +334,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"conversation_mode": {"kind":"primary_assistant"},
"websocket_request_id": str(uuid.uuid4()),
"supported_encodings": ["v1"],
- "supports_buffering": True
+ "supports_buffering": True,
+ "system_hints": ["search"] if web_search else None
}
if conversation.conversation_id is not None:
data["conversation_id"] = conversation.conversation_id
debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
- data["messages"] = cls.create_messages(messages, image_request)
+ data["messages"] = cls.create_messages(messages, image_request, ["search"] if web_search else None)
headers = {
**cls._headers,
"accept": "text/event-stream",
@@ -419,9 +423,9 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
generated_images = []
for element in c.get("parts"):
if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
- generated_images.append(
- cls.get_generated_image(session, cls._headers, element)
- )
+ image = cls.get_generated_image(session, cls._headers, element)
+ if image is not None:
+ generated_images.append(image)
for image_response in await asyncio.gather(*generated_images):
yield image_response
if m.get("author", {}).get("role") == "assistant":
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
index 1c7fe7c5..f3391706 100644
--- a/g4f/Provider/needs_auth/__init__.py
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -7,6 +7,7 @@ from .DeepInfra import DeepInfra
from .DeepInfraImage import DeepInfraImage
from .Gemini import Gemini
from .GeminiPro import GeminiPro
+from .GithubCopilot import GithubCopilot
from .Groq import Groq
from .HuggingFace import HuggingFace
from .HuggingFace2 import HuggingFace2
diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py
index 292164fa..628d7512 100644
--- a/g4f/api/__init__.py
+++ b/g4f/api/__init__.py
@@ -5,8 +5,10 @@ import json
import uvicorn
import secrets
import os
+import shutil
-from fastapi import FastAPI, Response, Request
+import os.path
+from fastapi import FastAPI, Response, Request, UploadFile
from fastapi.responses import StreamingResponse, RedirectResponse, HTMLResponse, JSONResponse
from fastapi.exceptions import RequestValidationError
from fastapi.security import APIKeyHeader
@@ -16,16 +18,17 @@ from fastapi.encoders import jsonable_encoder
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import FileResponse
from pydantic import BaseModel
-from typing import Union, Optional
+from typing import Union, Optional, List
import g4f
import g4f.debug
-from g4f.client import AsyncClient, ChatCompletion
+from g4f.client import AsyncClient, ChatCompletion, convert_to_provider
from g4f.providers.response import BaseConversation
from g4f.client.helper import filter_none
from g4f.image import is_accepted_format, images_dir
from g4f.typing import Messages
-from g4f.cookies import read_cookie_files
+from g4f.errors import ProviderNotFoundError
+from g4f.cookies import read_cookie_files, get_cookies_dir
from g4f.Provider import ProviderType, ProviderUtils, __providers__
logger = logging.getLogger(__name__)
@@ -78,6 +81,18 @@ class ImageGenerationConfig(BaseModel):
api_key: Optional[str] = None
proxy: Optional[str] = None
+class ProviderResponseModel(BaseModel):
+ id: str
+ object: str = "provider"
+ created: int
+ owned_by: Optional[str]
+
+class ModelResponseModel(BaseModel):
+ id: str
+ object: str = "model"
+ created: int
+ owned_by: Optional[str]
+
class AppConfig:
ignored_providers: Optional[list[str]] = None
g4f_api_key: Optional[str] = None
@@ -109,7 +124,7 @@ class Api:
def register_authorization(self):
@self.app.middleware("http")
async def authorization(request: Request, call_next):
- if self.g4f_api_key and request.url.path in ["/v1/chat/completions", "/v1/completions", "/v1/images/generate"]:
+ if self.g4f_api_key and request.url.path not in ("/", "/v1"):
try:
user_g4f_api_key = await self.get_g4f_api_key(request)
except HTTPException as e:
@@ -123,9 +138,7 @@ class Api:
status_code=HTTP_403_FORBIDDEN,
content=jsonable_encoder({"detail": "Invalid G4F API key"}),
)
-
- response = await call_next(request)
- return response
+ return await call_next(request)
def register_validation_exception_handler(self):
@self.app.exception_handler(RequestValidationError)
@@ -158,22 +171,21 @@ class Api:
'<a href="/docs">/docs</a>')
@self.app.get("/v1/models")
- async def models():
+ async def models() -> list[ModelResponseModel]:
model_list = dict(
(model, g4f.models.ModelUtils.convert[model])
for model in g4f.Model.__all__()
)
- model_list = [{
+ return [{
'id': model_id,
'object': 'model',
'created': 0,
'owned_by': model.base_provider
} for model_id, model in model_list.items()]
- return JSONResponse(model_list)
@self.app.get("/v1/models/{model_name}")
async def model_info(model_name: str):
- try:
+ if model_name in g4f.models.ModelUtils.convert:
model_info = g4f.models.ModelUtils.convert[model_name]
return JSONResponse({
'id': model_name,
@@ -181,8 +193,7 @@ class Api:
'created': 0,
'owned_by': model_info.base_provider
})
- except:
- return JSONResponse({"error": "The model does not exist."})
+ return JSONResponse({"error": "The model does not exist."}, 404)
@self.app.post("/v1/chat/completions")
async def chat_completions(config: ChatCompletionsConfig, request: Request = None, provider: str = None):
@@ -277,12 +288,68 @@ class Api:
logger.exception(e)
return Response(content=format_exception(e, config, True), status_code=500, media_type="application/json")
- @self.app.post("/v1/completions")
- async def completions():
- return Response(content=json.dumps({'info': 'Not working yet.'}, indent=4), media_type="application/json")
+ @self.app.get("/v1/providers")
+ async def providers() -> list[ProviderResponseModel]:
+ return [{
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ } for provider in __providers__ if provider.working]
+
+ @self.app.get("/v1/providers/{provider}")
+ async def providers_info(provider: str) -> ProviderResponseModel:
+ if provider not in ProviderUtils.convert:
+ return JSONResponse({"error": "The provider does not exist."}, 404)
+ provider: ProviderType = ProviderUtils.convert[provider]
+ def safe_get_models(provider: ProviderType) -> list[str]:
+ try:
+ return provider.get_models() if hasattr(provider, "get_models") else []
+ except:
+ return []
+ return {
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ 'models': safe_get_models(provider),
+ 'image_models': getattr(provider, "image_models", []) or [],
+ 'vision_models': [model for model in [getattr(provider, "default_vision_model", None)] if model],
+ 'params': [*provider.get_parameters()] if hasattr(provider, "get_parameters") else []
+ }
+
+ @self.app.post("/v1/upload_cookies")
+ def upload_cookies(files: List[UploadFile]):
+ response_data = []
+ for file in files:
+ try:
+ if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
+ filename = os.path.basename(file.filename)
+ with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
+ shutil.copyfileobj(file.file, f)
+ response_data.append({"filename": filename})
+ finally:
+ file.file.close()
+ return response_data
+
+ @self.app.get("/v1/synthesize/{provider}")
+ async def synthesize(request: Request, provider: str):
+ try:
+ provider_handler = convert_to_provider(provider)
+ except ProviderNotFoundError:
+ return Response("Provider not found", 404)
+ if not hasattr(provider_handler, "synthesize"):
+ return Response("Provider doesn't support synthesize", 500)
+ if len(request.query_params) == 0:
+ return Response("Missing query params", 500)
+ response_data = provider_handler.synthesize({**request.query_params})
+ content_type = getattr(provider_handler, "synthesize_content_type", "application/octet-stream")
+ return StreamingResponse(response_data, media_type=content_type)
@self.app.get("/images/{filename}")
- async def get_image(filename):
+ async def get_image(filename) -> FileResponse:
target = os.path.join(images_dir, filename)
if not os.path.isfile(target):
diff --git a/g4f/image.py b/g4f/image.py
index 114dcc13..e9abcb6e 100644
--- a/g4f/image.py
+++ b/g4f/image.py
@@ -33,10 +33,14 @@ EXTENSIONS_MAP: dict[str, str] = {
# Define the directory for generated images
images_dir = "./generated_images"
-def fix_url(url:str) -> str:
+def fix_url(url: str) -> str:
""" replace ' ' by '+' (to be markdown compliant)"""
return url.replace(" ","+")
+def fix_title(title: str) -> str:
+ if title:
+ return title.replace("\n", "").replace('"', '')
+
def to_image(image: ImageType, is_svg: bool = False) -> Image:
"""
Converts the input image to a PIL Image object.
@@ -226,12 +230,12 @@ def format_images_markdown(images: Union[str, list], alt: str, preview: Union[st
str: The formatted markdown string.
"""
if isinstance(images, str):
- result = f"[![{alt}]({fix_url(preview.replace('{image}', images) if preview else images)})]({fix_url(images)})"
+ result = f"[![{fix_title(alt)}]({fix_url(preview.replace('{image}', images) if preview else images)})]({fix_url(images)})"
else:
if not isinstance(preview, list):
preview = [preview.replace('{image}', image) if preview else image for image in images]
result = "\n".join(
- f"[![#{idx+1} {alt}]({fix_url(preview[idx])})]({fix_url(image)})"
+ f"[![#{idx+1} {fix_title(alt)}]({fix_url(preview[idx])})]({fix_url(image)})"
for idx, image in enumerate(images)
)
start_flag = "<!-- generated images start -->\n"
diff --git a/requirements-slim.txt b/requirements-slim.txt
index b9cbceba..2377faa3 100644
--- a/requirements-slim.txt
+++ b/requirements-slim.txt
@@ -13,4 +13,5 @@ flask
brotli
beautifulsoup4
aiohttp_socks
-cryptography \ No newline at end of file
+cryptography
+python-multipart \ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 83130838..11c34d59 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,4 +18,5 @@ aiohttp_socks
pywebview
plyer
cryptography
-nodriver \ No newline at end of file
+nodriver
+python-multipart \ No newline at end of file
diff --git a/setup.py b/setup.py
index 12581be9..31a1b683 100644
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,20 @@ EXTRA_REQUIRE = {
"fastapi", # api
"uvicorn", # api
"nodriver",
+ "python-multipart",
+ ],
+ 'slim': [
+ "curl_cffi>=0.6.2",
+ "certifi",
+ "duckduckgo-search>=5.0" ,# internet.search
+ "beautifulsoup4", # internet.search and bing.create_images
+ "aiohttp_socks", # proxy
+ "pillow", # image
+ "cairosvg", # svg image
+ "werkzeug", "flask", # gui
+ "fastapi", # api
+ "uvicorn", # api
+ "python-multipart",
],
'slim': [
"curl_cffi>=0.6.2",
@@ -69,7 +83,8 @@ EXTRA_REQUIRE = {
"werkzeug", "flask",
"beautifulsoup4", "pillow",
"duckduckgo-search>=5.0",
- "browser_cookie3"
+ "browser_cookie3",
+ "python-multipart",
],
"search": [
"beautifulsoup4", "pillow",