From 6b48af17572e14ccac382a7010edfaaf379c7e35 Mon Sep 17 00:00:00 2001 From: Heiner Lohaus Date: Mon, 25 Nov 2024 20:34:50 +0100 Subject: Add armv7 docker image, Remove Bing provider, add update banner --- .github/workflows/publish-workflow.yaml | 16 +- docker/Dockerfile-armv7 | 67 ++++ docker/Dockerfile-slim | 4 +- docs/async_client.md | 3 +- g4f/Provider/Bing.py | 523 -------------------------------- g4f/Provider/__init__.py | 1 - g4f/Provider/deprecated/Bing.py | 523 ++++++++++++++++++++++++++++++++ g4f/Provider/deprecated/__init__.py | 1 + g4f/gui/client/static/css/style.css | 19 ++ g4f/gui/client/static/js/chat.v1.js | 10 +- requirements-slim.txt | 1 - 11 files changed, 636 insertions(+), 532 deletions(-) create mode 100644 docker/Dockerfile-armv7 delete mode 100644 g4f/Provider/Bing.py create mode 100644 g4f/Provider/deprecated/Bing.py diff --git a/.github/workflows/publish-workflow.yaml b/.github/workflows/publish-workflow.yaml index ebddb1f3..6b8112f1 100644 --- a/.github/workflows/publish-workflow.yaml +++ b/.github/workflows/publish-workflow.yaml @@ -57,12 +57,26 @@ jobs: username: ${{ github.repository_owner }} password: ${{ secrets.GHCR_PAT }} + - name: Build and push armv7 image + uses: docker/build-push-action@v5 + with: + context: . + file: docker/Dockerfile-armv7 + platforms: linux/arm/v7 + push: true + tags: | + hlohaus789/g4f:latest-armv7 + hlohaus789/g4f:${{ github.ref_name }}-armv7 + labels: ${{ steps.metadata.outputs.labels }} + build-args: | + G4F_VERSION=${{ github.ref_name }} + - name: Build and push small images uses: docker/build-push-action@v5 with: context: . file: docker/Dockerfile-slim - platforms: linux/amd64,linux/arm64,linux/arm/v7 + platforms: linux/amd64,linux/arm64 push: true tags: | hlohaus789/g4f:latest-slim diff --git a/docker/Dockerfile-armv7 b/docker/Dockerfile-armv7 new file mode 100644 index 00000000..866ac63b --- /dev/null +++ b/docker/Dockerfile-armv7 @@ -0,0 +1,67 @@ +FROM python:slim-bookworm + +ARG G4F_VERSION +ARG G4F_USER=g4f +ARG G4F_USER_ID=1000 +ARG PYDANTIC_VERSION=1.8.1 + +ENV G4F_VERSION $G4F_VERSION +ENV G4F_USER $G4F_USER +ENV G4F_USER_ID $G4F_USER_ID +ENV G4F_DIR /app + +RUN apt-get update && apt-get upgrade -y \ + && apt-get install -y git \ + && apt-get install --quiet --yes --no-install-recommends \ + build-essential \ +# Add user and user group + && groupadd -g $G4F_USER_ID $G4F_USER \ + && useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \ + && mkdir -p /var/log/supervisor \ + && chown "${G4F_USER_ID}:${G4F_USER_ID}" /var/log/supervisor \ + && echo "${G4F_USER}:${G4F_USER}" | chpasswd \ + && python -m pip install --upgrade pip + +USER $G4F_USER_ID +WORKDIR $G4F_DIR + +ENV HOME /home/$G4F_USER +ENV PATH "${HOME}/.local/bin:${PATH}" + +# Create app dir and copy the project's requirements file into it +RUN mkdir -p $G4F_DIR +COPY requirements-min.txt $G4F_DIR +COPY requirements-slim.txt $G4F_DIR + +# Upgrade pip for the latest features and install the project's Python dependencies. +RUN pip install --no-cache-dir -r requirements-min.txt \ + && pip install --no-cache-dir --no-binary setuptools \ + Cython==0.29.22 \ + setuptools \ + # Install PyDantic + && pip install \ + -vvv \ + --no-cache-dir \ + --no-binary :all: \ + --global-option=build_ext \ + --global-option=-j8 \ + pydantic==${PYDANTIC_VERSION} +RUN cat requirements-slim.txt | xargs -n 1 pip install --no-cache-dir || true + +# Remove build packages +RUN pip uninstall --yes \ + Cython \ + setuptools + +USER root + +# Clean up build deps +RUN apt-get purge --auto-remove --yes \ + build-essential \ + && apt-get clean \ + && rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/* + +USER $G4F_USER_ID + +# Copy the entire package into the container. +ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f \ No newline at end of file diff --git a/docker/Dockerfile-slim b/docker/Dockerfile-slim index 04238144..dfc3344d 100644 --- a/docker/Dockerfile-slim +++ b/docker/Dockerfile-slim @@ -29,11 +29,11 @@ ENV PATH "${HOME}/.local/bin:${PATH}" # Create app dir and copy the project's requirements file into it RUN mkdir -p $G4F_DIR -COPY requirements-min.txt $G4F_DIR COPY requirements-slim.txt $G4F_DIR # Upgrade pip for the latest features and install the project's Python dependencies. -RUN cat requirements-slim.txt | xargs -n 1 pip install --no-cache-dir || true +RUN pip install --no-cache-dir -r requirements-slim.txt \ + && pip install --no-cache-dir duckduckgo-search>=5.0 # Copy the entire package into the container. ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f \ No newline at end of file diff --git a/docs/async_client.md b/docs/async_client.md index e501aefa..cc4c5806 100644 --- a/docs/async_client.md +++ b/docs/async_client.md @@ -280,10 +280,9 @@ The G4F AsyncClient supports a wide range of AI models and providers, allowing y - OpenAI - Google (for Gemini) - Anthropic - - Bing + - Microsoft Copilot - Custom providers - **To use a specific model or provider, specify it when creating the client or in the API call:** ```python diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py deleted file mode 100644 index cdc2b9d9..00000000 --- a/g4f/Provider/Bing.py +++ /dev/null @@ -1,523 +0,0 @@ -from __future__ import annotations - -import random -import json -import uuid -import time -import asyncio -from urllib import parse -from datetime import datetime, date - -from ..typing import AsyncResult, Messages, ImageType, Cookies -from ..image import ImageRequest -from ..errors import ResponseError, ResponseStatusError, RateLimitError -from ..requests import DEFAULT_HEADERS -from ..requests.aiohttp import StreamSession -from .base_provider import AsyncGeneratorProvider, ProviderModelMixin -from .helper import get_random_hex -from .bing.upload_image import upload_image -from .bing.conversation import Conversation, create_conversation, delete_conversation -from .needs_auth.BingCreateImages import BingCreateImages -from .. import debug - -class Tones: - """ - Defines the different tone options for the Bing provider. - """ - creative = "Creative" - balanced = "Balanced" - precise = "Precise" - copilot = "Copilot" - -class Bing(AsyncGeneratorProvider, ProviderModelMixin): - """ - Bing provider for generating responses using the Bing API. - """ - label = "Microsoft Copilot in Bing" - url = "https://bing.com/chat" - working = True - supports_message_history = True - default_model = "Balanced" - default_vision_model = "gpt-4-vision" - models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")] - - @classmethod - def create_async_generator( - cls, - model: str, - messages: Messages, - proxy: str = None, - timeout: int = 900, - api_key: str = None, - cookies: Cookies = None, - tone: str = None, - image: ImageType = None, - web_search: bool = False, - context: str = None, - **kwargs - ) -> AsyncResult: - """ - Creates an asynchronous generator for producing responses from Bing. - - :param model: The model to use. - :param messages: Messages to process. - :param proxy: Proxy to use for requests. - :param timeout: Timeout for requests. - :param cookies: Cookies for the session. - :param tone: The tone of the response. - :param image: The image type to be used. - :param web_search: Flag to enable or disable web search. - :return: An asynchronous result object. - """ - prompt = messages[-1]["content"] - if context is None: - context = create_context(messages[:-1]) if len(messages) > 1 else None - if tone is None: - tone = tone if model.startswith("gpt-4") else model - tone = cls.get_model("" if tone is None else tone) - gpt4_turbo = True if model.startswith("gpt-4-turbo") else False - - return stream_generate( - prompt, tone, image, context, cookies, api_key, - proxy, web_search, gpt4_turbo, timeout, - **kwargs - ) - -def create_context(messages: Messages) -> str: - """ - Creates a context string from a list of messages. - - :param messages: A list of message dictionaries. - :return: A string representing the context created from the messages. - """ - return "".join( - f"[{message['role']}]" + ("(#message)" - if message['role'] != "system" - else "(#additional_instructions)") + f"\n{message['content']}" - for message in messages - ) + "\n\n" - -def get_ip_address() -> str: - return f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" - -def get_default_cookies(): - #muid = get_random_hex().upper() - sid = get_random_hex().upper() - guid = get_random_hex().upper() - isodate = date.today().isoformat() - timestamp = int(time.time()) - zdate = "0001-01-01T00:00:00.0000000" - return { - "_C_Auth": "", - #"MUID": muid, - #"MUIDB": muid, - "_EDGE_S": f"F=1&SID={sid}", - "_EDGE_V": "1", - "SRCHD": "AF=hpcodx", - "SRCHUID": f"V=2&GUID={guid}&dmnchg=1", - "_RwBf": ( - f"r=0&ilt=1&ihpd=0&ispd=0&rc=3&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=" - f"&clo=0&v=1&l={isodate}&lft={zdate}&aof=0&ard={zdate}" - f"&rwdbt={zdate}&rwflt={zdate}&o=2&p=&c=&t=0&s={zdate}" - f"&ts={isodate}&rwred=0&wls=&wlb=" - "&wle=&ccp=&cpt=&lka=0&lkt=0&aad=0&TH=" - ), - '_Rwho': f'u=d&ts={isodate}', - "_SS": f"SID={sid}&R=3&RB=0&GB=0&RG=200&RP=0", - "SRCHUSR": f"DOB={date.today().strftime('%Y%m%d')}&T={timestamp}", - "SRCHHPGUSR": f"HV={int(time.time())}", - "BCP": "AD=1&AL=1&SM=1", - "ipv6": f"hit={timestamp}", - '_C_ETH' : '1', - } - -async def create_headers(cookies: Cookies = None, api_key: str = None) -> dict: - if cookies is None: - # import nodriver as uc - # browser = await uc.start(headless=False) - # page = await browser.get(Defaults.home) - # await asyncio.sleep(10) - # cookies = {} - # for c in await page.browser.cookies.get_all(): - # if c.domain.endswith(".bing.com"): - # cookies[c.name] = c.value - # user_agent = await page.evaluate("window.navigator.userAgent") - # await page.close() - cookies = get_default_cookies() - if api_key is not None: - cookies["_U"] = api_key - headers = Defaults.headers.copy() - headers["cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items()) - return headers - -class Defaults: - """ - Default settings and configurations for the Bing provider. - """ - delimiter = "\x1e" - - # List of allowed message types for Bing responses - allowedMessageTypes = [ - "ActionRequest","Chat", - "ConfirmationCard", "Context", - "InternalSearchQuery", #"InternalSearchResult", - #"Disengaged", "InternalLoaderMessage", - "Progress", "RenderCardRequest", - "RenderContentRequest", "AdsQuery", - "SemanticSerp", "GenerateContentQuery", - "SearchQuery", "GeneratedCode", - "InternalTasksMessage" - ] - - sliceIds = { - "balanced": [ - "supllmnfe","archnewtf", - "stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl", - "thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t", - "bingfc", "0225unsticky1", "0228scss0", - "defquerycf", "defcontrol", "3022tphpv" - ], - "creative": [ - "bgstream", "fltltst2c", - "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", - "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", - "bingfccf", "0225unsticky1", "0228scss0", - "3022tpvs0" - ], - "precise": [ - "bgstream", "fltltst2c", - "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", - "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", - "bingfccf", "0225unsticky1", "0228scss0", - "defquerycf", "3022tpvs0" - ], - "copilot": [] - } - - optionsSets = { - "balanced": { - "default": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "galileo", "saharagenconv5", "gldcl1p", - "gpt4tmncnp" - ], - "nosearch": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "galileo", "sunoupsell", "base64filter", "uprv4p1upd", - "hourthrot", "noctprf", "gndlogcf", "nosearchall" - ] - }, - "creative": { - "default": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", - "iyxapbing", "iycapbing", - "h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3", - "gpt4tmncnp" - ], - "nosearch": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "h3imaginative", "sunoupsell", "base64filter", "uprv4p1upd", - "hourthrot", "noctprf", "gndlogcf", "nosearchall", - "clgalileo", "nocache", "up4rp14bstcst" - ] - }, - "precise": { - "default": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", - "iyxapbing", "iycapbing", - "h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot", - "clgalileo", "gencontentv3" - ], - "nosearch": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", "autosave", - "iyxapbing", "iycapbing", - "h3precise", "sunoupsell", "base64filter", "uprv4p1upd", - "hourthrot", "noctprf", "gndlogcf", "nosearchall", - "clgalileo", "nocache", "up4rp14bstcst" - ] - }, - "copilot": [ - "nlu_direct_response_filter", "deepleo", - "disable_emoji_spoken_text", "responsible_ai_policy_235", - "enablemm", "dv3sugg", - "iyxapbing", "iycapbing", - "h3precise", "clgalileo", "gencontentv3", "prjupy" - ], - } - - # Default location settings - location = { - "locale": "en-US", "market": "en-US", "region": "US", - "location":"lat:34.0536909;long:-118.242766;re=1000m;", - "locationHints": [{ - "country": "United States", "state": "California", "city": "Los Angeles", - "timezoneoffset": 8, "countryConfidence": 8, - "Center": {"Latitude": 34.0536909, "Longitude": -118.242766}, - "RegionType": 2, "SourceType": 1 - }], - } - - # Default headers for requests - home = "https://www.bing.com/chat?q=Microsoft+Copilot&FORM=hpcodx" - headers = { - **DEFAULT_HEADERS, - "accept": "application/json", - "referer": home, - "x-ms-client-request-id": str(uuid.uuid4()), - "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.15.1 OS/Windows", - } - -def format_message(msg: dict) -> str: - """ - Formats a message dictionary into a JSON string with a delimiter. - - :param msg: The message dictionary to format. - :return: A formatted string representation of the message. - """ - return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter - -def create_message( - conversation: Conversation, - prompt: str, - tone: str, - context: str = None, - image_request: ImageRequest = None, - web_search: bool = False, - gpt4_turbo: bool = False, - new_conversation: bool = True -) -> str: - """ - Creates a message for the Bing API with specified parameters. - - :param conversation: The current conversation object. - :param prompt: The user's input prompt. - :param tone: The desired tone for the response. - :param context: Additional context for the prompt. - :param image_request: The image request with the url. - :param web_search: Flag to enable web search. - :param gpt4_turbo: Flag to enable GPT-4 Turbo. - :return: A formatted string message for the Bing API. - """ - - options_sets = Defaults.optionsSets[tone.lower()] - if not web_search and "nosearch" in options_sets: - options_sets = options_sets["nosearch"] - elif "default" in options_sets: - options_sets = options_sets["default"] - options_sets = options_sets.copy() - if gpt4_turbo: - options_sets.append("dlgpt4t") - - request_id = str(uuid.uuid4()) - struct = { - "arguments":[{ - "source": "cib", - "optionsSets": options_sets, - "allowedMessageTypes": Defaults.allowedMessageTypes, - "sliceIds": Defaults.sliceIds[tone.lower()], - "verbosity": "verbose", - "scenario": "CopilotMicrosoftCom" if tone == Tones.copilot else "SERP", - "plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [], - "traceId": get_random_hex(40), - "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"], - "gptId": "copilot", - "isStartOfSession": new_conversation, - "requestId": request_id, - "message":{ - **Defaults.location, - "userIpAddress": get_ip_address(), - "timestamp": datetime.now().isoformat(), - "author": "user", - "inputMethod": "Keyboard", - "text": prompt, - "messageType": "Chat", - "requestId": request_id, - "messageId": request_id - }, - "tone": "Balanced" if tone == Tones.copilot else tone, - "spokenTextMode": "None", - "conversationId": conversation.conversationId, - "participant": {"id": conversation.clientId} - }], - "invocationId": "0", - "target": "chat", - "type": 4 - } - - if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'): - struct['arguments'][0]['message']['originalImageUrl'] = image_request.get('originalImageUrl') - struct['arguments'][0]['message']['imageUrl'] = image_request.get('imageUrl') - struct['arguments'][0]['experienceType'] = None - struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None} - - if context: - struct['arguments'][0]['previousMessages'] = [{ - "author": "user", - "description": context, - "contextType": "ClientApp", - "messageType": "Context", - "messageId": "discover-web--page-ping-mriduna-----" - }] - - return format_message(struct) - -async def stream_generate( - prompt: str, - tone: str, - image: ImageType = None, - context: str = None, - cookies: dict = None, - api_key: str = None, - proxy: str = None, - web_search: bool = False, - gpt4_turbo: bool = False, - timeout: int = 900, - conversation: Conversation = None, - return_conversation: bool = False, - raise_apology: bool = False, - max_retries: int = None, - sleep_retry: int = 15, - **kwargs -): - """ - Asynchronously streams generated responses from the Bing API. - - :param prompt: The user's input prompt. - :param tone: The desired tone for the response. - :param image: The image type involved in the response. - :param context: Additional context for the prompt. - :param cookies: Cookies for the session. - :param web_search: Flag to enable web search. - :param gpt4_turbo: Flag to enable GPT-4 Turbo. - :param timeout: Timeout for the request. - :return: An asynchronous generator yielding responses. - """ - headers = await create_headers(cookies, api_key) - new_conversation = conversation is None - max_retries = (5 if new_conversation else 0) if max_retries is None else max_retries - first = True - while first or conversation is None: - async with StreamSession(timeout=timeout, proxy=proxy) as session: - first = False - do_read = True - try: - if conversation is None: - conversation = await create_conversation(session, headers, tone) - if return_conversation: - yield conversation - except (ResponseStatusError, RateLimitError) as e: - max_retries -= 1 - if max_retries < 1: - raise e - if debug.logging: - print(f"Bing: Retry: {e}") - headers = await create_headers() - await asyncio.sleep(sleep_retry) - continue - - image_request = await upload_image( - session, - image, - "Balanced" if tone == Tones.copilot else tone, - headers - ) if image else None - async with session.ws_connect( - 'wss://s.copilot.microsoft.com/sydney/ChatHub' - if tone == "Copilot" else - 'wss://sydney.bing.com/sydney/ChatHub', - autoping=False, - params={'sec_access_token': conversation.conversationSignature}, - headers=headers - ) as wss: - await wss.send_str(format_message({'protocol': 'json', 'version': 1})) - await wss.send_str(format_message({"type": 6})) - await wss.receive_str() - await wss.send_str(create_message( - conversation, prompt, tone, - context if new_conversation else None, - image_request, web_search, gpt4_turbo, - new_conversation - )) - response_txt = '' - returned_text = '' - message_id = None - while do_read: - try: - msg = await wss.receive_str() - except TypeError: - continue - objects = msg.split(Defaults.delimiter) - for obj in objects: - if not obj: - continue - try: - response = json.loads(obj) - except ValueError: - continue - if response and response.get('type') == 1 and response['arguments'][0].get('messages'): - message = response['arguments'][0]['messages'][0] - if message_id is not None and message_id != message["messageId"]: - returned_text = '' - message_id = message["messageId"] - image_response = None - if (raise_apology and message['contentOrigin'] == 'Apology'): - raise ResponseError("Apology Response Error") - if 'adaptiveCards' in message: - card = message['adaptiveCards'][0]['body'][0] - if "text" in card: - response_txt = card.get('text') - if message.get('messageType') and "inlines" in card: - inline_txt = card['inlines'][0].get('text') - response_txt += f"{inline_txt}\n" - elif message.get('contentType') == "IMAGE": - prompt = message.get('text') - try: - image_client = BingCreateImages(cookies, proxy, api_key) - image_response = await image_client.create_async(prompt) - except Exception as e: - if debug.logging: - print(f"Bing: Failed to create images: {e}") - image_response = f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}" - if response_txt.startswith(returned_text): - new = response_txt[len(returned_text):] - if new not in ("", "\n"): - yield new - returned_text = response_txt - if image_response is not None: - yield image_response - elif response.get('type') == 2: - result = response['item']['result'] - do_read = False - if result.get('error'): - max_retries -= 1 - if max_retries < 1: - if result["value"] == "CaptchaChallenge": - raise RateLimitError(f"{result['value']}: Use other cookies or/and ip address") - else: - raise RuntimeError(f"{result['value']}: {result['message']}") - if debug.logging: - print(f"Bing: Retry: {result['value']}: {result['message']}") - headers = await create_headers() - conversation = None - await asyncio.sleep(sleep_retry) - break - elif response.get('type') == 3: - do_read = False - break - if conversation is not None: - await delete_conversation(session, conversation, headers) diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 378f09c8..e23f5f60 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -14,7 +14,6 @@ from .local import * from .AIUncensored import AIUncensored from .Airforce import Airforce from .AmigoChat import AmigoChat -from .Bing import Bing from .Blackbox import Blackbox from .ChatGpt import ChatGpt from .ChatGptEs import ChatGptEs diff --git a/g4f/Provider/deprecated/Bing.py b/g4f/Provider/deprecated/Bing.py new file mode 100644 index 00000000..49bce146 --- /dev/null +++ b/g4f/Provider/deprecated/Bing.py @@ -0,0 +1,523 @@ +from __future__ import annotations + +import random +import json +import uuid +import time +import asyncio +from urllib import parse +from datetime import datetime, date + +from ...typing import AsyncResult, Messages, ImageType, Cookies +from ...image import ImageRequest +from ...errors import ResponseError, ResponseStatusError, RateLimitError +from ...requests import DEFAULT_HEADERS +from ...requests.aiohttp import StreamSession +from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..helper import get_random_hex +from ..bing.upload_image import upload_image +from ..bing.conversation import Conversation, create_conversation, delete_conversation +from ..needs_auth.BingCreateImages import BingCreateImages +from ... import debug + +class Tones: + """ + Defines the different tone options for the Bing provider. + """ + creative = "Creative" + balanced = "Balanced" + precise = "Precise" + copilot = "Copilot" + +class Bing(AsyncGeneratorProvider, ProviderModelMixin): + """ + Bing provider for generating responses using the Bing API. + """ + label = "Microsoft Copilot in Bing" + url = "https://bing.com/chat" + working = False + supports_message_history = True + default_model = "Balanced" + default_vision_model = "gpt-4-vision" + models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")] + + @classmethod + def create_async_generator( + cls, + model: str, + messages: Messages, + proxy: str = None, + timeout: int = 900, + api_key: str = None, + cookies: Cookies = None, + tone: str = None, + image: ImageType = None, + web_search: bool = False, + context: str = None, + **kwargs + ) -> AsyncResult: + """ + Creates an asynchronous generator for producing responses from Bing. + + :param model: The model to use. + :param messages: Messages to process. + :param proxy: Proxy to use for requests. + :param timeout: Timeout for requests. + :param cookies: Cookies for the session. + :param tone: The tone of the response. + :param image: The image type to be used. + :param web_search: Flag to enable or disable web search. + :return: An asynchronous result object. + """ + prompt = messages[-1]["content"] + if context is None: + context = create_context(messages[:-1]) if len(messages) > 1 else None + if tone is None: + tone = tone if model.startswith("gpt-4") else model + tone = cls.get_model("" if tone is None else tone) + gpt4_turbo = True if model.startswith("gpt-4-turbo") else False + + return stream_generate( + prompt, tone, image, context, cookies, api_key, + proxy, web_search, gpt4_turbo, timeout, + **kwargs + ) + +def create_context(messages: Messages) -> str: + """ + Creates a context string from a list of messages. + + :param messages: A list of message dictionaries. + :return: A string representing the context created from the messages. + """ + return "".join( + f"[{message['role']}]" + ("(#message)" + if message['role'] != "system" + else "(#additional_instructions)") + f"\n{message['content']}" + for message in messages + ) + "\n\n" + +def get_ip_address() -> str: + return f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" + +def get_default_cookies(): + #muid = get_random_hex().upper() + sid = get_random_hex().upper() + guid = get_random_hex().upper() + isodate = date.today().isoformat() + timestamp = int(time.time()) + zdate = "0001-01-01T00:00:00.0000000" + return { + "_C_Auth": "", + #"MUID": muid, + #"MUIDB": muid, + "_EDGE_S": f"F=1&SID={sid}", + "_EDGE_V": "1", + "SRCHD": "AF=hpcodx", + "SRCHUID": f"V=2&GUID={guid}&dmnchg=1", + "_RwBf": ( + f"r=0&ilt=1&ihpd=0&ispd=0&rc=3&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=" + f"&clo=0&v=1&l={isodate}&lft={zdate}&aof=0&ard={zdate}" + f"&rwdbt={zdate}&rwflt={zdate}&o=2&p=&c=&t=0&s={zdate}" + f"&ts={isodate}&rwred=0&wls=&wlb=" + "&wle=&ccp=&cpt=&lka=0&lkt=0&aad=0&TH=" + ), + '_Rwho': f'u=d&ts={isodate}', + "_SS": f"SID={sid}&R=3&RB=0&GB=0&RG=200&RP=0", + "SRCHUSR": f"DOB={date.today().strftime('%Y%m%d')}&T={timestamp}", + "SRCHHPGUSR": f"HV={int(time.time())}", + "BCP": "AD=1&AL=1&SM=1", + "ipv6": f"hit={timestamp}", + '_C_ETH' : '1', + } + +async def create_headers(cookies: Cookies = None, api_key: str = None) -> dict: + if cookies is None: + # import nodriver as uc + # browser = await uc.start(headless=False) + # page = await browser.get(Defaults.home) + # await asyncio.sleep(10) + # cookies = {} + # for c in await page.browser.cookies.get_all(): + # if c.domain.endswith(".bing.com"): + # cookies[c.name] = c.value + # user_agent = await page.evaluate("window.navigator.userAgent") + # await page.close() + cookies = get_default_cookies() + if api_key is not None: + cookies["_U"] = api_key + headers = Defaults.headers.copy() + headers["cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items()) + return headers + +class Defaults: + """ + Default settings and configurations for the Bing provider. + """ + delimiter = "\x1e" + + # List of allowed message types for Bing responses + allowedMessageTypes = [ + "ActionRequest","Chat", + "ConfirmationCard", "Context", + "InternalSearchQuery", #"InternalSearchResult", + #"Disengaged", "InternalLoaderMessage", + "Progress", "RenderCardRequest", + "RenderContentRequest", "AdsQuery", + "SemanticSerp", "GenerateContentQuery", + "SearchQuery", "GeneratedCode", + "InternalTasksMessage" + ] + + sliceIds = { + "balanced": [ + "supllmnfe","archnewtf", + "stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl", + "thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t", + "bingfc", "0225unsticky1", "0228scss0", + "defquerycf", "defcontrol", "3022tphpv" + ], + "creative": [ + "bgstream", "fltltst2c", + "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", + "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", + "bingfccf", "0225unsticky1", "0228scss0", + "3022tpvs0" + ], + "precise": [ + "bgstream", "fltltst2c", + "stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl", + "0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t", + "bingfccf", "0225unsticky1", "0228scss0", + "defquerycf", "3022tpvs0" + ], + "copilot": [] + } + + optionsSets = { + "balanced": { + "default": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", "autosave", + "iyxapbing", "iycapbing", + "galileo", "saharagenconv5", "gldcl1p", + "gpt4tmncnp" + ], + "nosearch": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", "autosave", + "iyxapbing", "iycapbing", + "galileo", "sunoupsell", "base64filter", "uprv4p1upd", + "hourthrot", "noctprf", "gndlogcf", "nosearchall" + ] + }, + "creative": { + "default": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", + "iyxapbing", "iycapbing", + "h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3", + "gpt4tmncnp" + ], + "nosearch": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", "autosave", + "iyxapbing", "iycapbing", + "h3imaginative", "sunoupsell", "base64filter", "uprv4p1upd", + "hourthrot", "noctprf", "gndlogcf", "nosearchall", + "clgalileo", "nocache", "up4rp14bstcst" + ] + }, + "precise": { + "default": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", + "iyxapbing", "iycapbing", + "h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot", + "clgalileo", "gencontentv3" + ], + "nosearch": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", "autosave", + "iyxapbing", "iycapbing", + "h3precise", "sunoupsell", "base64filter", "uprv4p1upd", + "hourthrot", "noctprf", "gndlogcf", "nosearchall", + "clgalileo", "nocache", "up4rp14bstcst" + ] + }, + "copilot": [ + "nlu_direct_response_filter", "deepleo", + "disable_emoji_spoken_text", "responsible_ai_policy_235", + "enablemm", "dv3sugg", + "iyxapbing", "iycapbing", + "h3precise", "clgalileo", "gencontentv3", "prjupy" + ], + } + + # Default location settings + location = { + "locale": "en-US", "market": "en-US", "region": "US", + "location":"lat:34.0536909;long:-118.242766;re=1000m;", + "locationHints": [{ + "country": "United States", "state": "California", "city": "Los Angeles", + "timezoneoffset": 8, "countryConfidence": 8, + "Center": {"Latitude": 34.0536909, "Longitude": -118.242766}, + "RegionType": 2, "SourceType": 1 + }], + } + + # Default headers for requests + home = "https://www.bing.com/chat?q=Microsoft+Copilot&FORM=hpcodx" + headers = { + **DEFAULT_HEADERS, + "accept": "application/json", + "referer": home, + "x-ms-client-request-id": str(uuid.uuid4()), + "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.15.1 OS/Windows", + } + +def format_message(msg: dict) -> str: + """ + Formats a message dictionary into a JSON string with a delimiter. + + :param msg: The message dictionary to format. + :return: A formatted string representation of the message. + """ + return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter + +def create_message( + conversation: Conversation, + prompt: str, + tone: str, + context: str = None, + image_request: ImageRequest = None, + web_search: bool = False, + gpt4_turbo: bool = False, + new_conversation: bool = True +) -> str: + """ + Creates a message for the Bing API with specified parameters. + + :param conversation: The current conversation object. + :param prompt: The user's input prompt. + :param tone: The desired tone for the response. + :param context: Additional context for the prompt. + :param image_request: The image request with the url. + :param web_search: Flag to enable web search. + :param gpt4_turbo: Flag to enable GPT-4 Turbo. + :return: A formatted string message for the Bing API. + """ + + options_sets = Defaults.optionsSets[tone.lower()] + if not web_search and "nosearch" in options_sets: + options_sets = options_sets["nosearch"] + elif "default" in options_sets: + options_sets = options_sets["default"] + options_sets = options_sets.copy() + if gpt4_turbo: + options_sets.append("dlgpt4t") + + request_id = str(uuid.uuid4()) + struct = { + "arguments":[{ + "source": "cib", + "optionsSets": options_sets, + "allowedMessageTypes": Defaults.allowedMessageTypes, + "sliceIds": Defaults.sliceIds[tone.lower()], + "verbosity": "verbose", + "scenario": "CopilotMicrosoftCom" if tone == Tones.copilot else "SERP", + "plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [], + "traceId": get_random_hex(40), + "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"], + "gptId": "copilot", + "isStartOfSession": new_conversation, + "requestId": request_id, + "message":{ + **Defaults.location, + "userIpAddress": get_ip_address(), + "timestamp": datetime.now().isoformat(), + "author": "user", + "inputMethod": "Keyboard", + "text": prompt, + "messageType": "Chat", + "requestId": request_id, + "messageId": request_id + }, + "tone": "Balanced" if tone == Tones.copilot else tone, + "spokenTextMode": "None", + "conversationId": conversation.conversationId, + "participant": {"id": conversation.clientId} + }], + "invocationId": "0", + "target": "chat", + "type": 4 + } + + if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'): + struct['arguments'][0]['message']['originalImageUrl'] = image_request.get('originalImageUrl') + struct['arguments'][0]['message']['imageUrl'] = image_request.get('imageUrl') + struct['arguments'][0]['experienceType'] = None + struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None} + + if context: + struct['arguments'][0]['previousMessages'] = [{ + "author": "user", + "description": context, + "contextType": "ClientApp", + "messageType": "Context", + "messageId": "discover-web--page-ping-mriduna-----" + }] + + return format_message(struct) + +async def stream_generate( + prompt: str, + tone: str, + image: ImageType = None, + context: str = None, + cookies: dict = None, + api_key: str = None, + proxy: str = None, + web_search: bool = False, + gpt4_turbo: bool = False, + timeout: int = 900, + conversation: Conversation = None, + return_conversation: bool = False, + raise_apology: bool = False, + max_retries: int = None, + sleep_retry: int = 15, + **kwargs +): + """ + Asynchronously streams generated responses from the Bing API. + + :param prompt: The user's input prompt. + :param tone: The desired tone for the response. + :param image: The image type involved in the response. + :param context: Additional context for the prompt. + :param cookies: Cookies for the session. + :param web_search: Flag to enable web search. + :param gpt4_turbo: Flag to enable GPT-4 Turbo. + :param timeout: Timeout for the request. + :return: An asynchronous generator yielding responses. + """ + headers = await create_headers(cookies, api_key) + new_conversation = conversation is None + max_retries = (5 if new_conversation else 0) if max_retries is None else max_retries + first = True + while first or conversation is None: + async with StreamSession(timeout=timeout, proxy=proxy) as session: + first = False + do_read = True + try: + if conversation is None: + conversation = await create_conversation(session, headers, tone) + if return_conversation: + yield conversation + except (ResponseStatusError, RateLimitError) as e: + max_retries -= 1 + if max_retries < 1: + raise e + if debug.logging: + print(f"Bing: Retry: {e}") + headers = await create_headers() + await asyncio.sleep(sleep_retry) + continue + + image_request = await upload_image( + session, + image, + "Balanced" if tone == Tones.copilot else tone, + headers + ) if image else None + async with session.ws_connect( + 'wss://s.copilot.microsoft.com/sydney/ChatHub' + if tone == "Copilot" else + 'wss://sydney.bing.com/sydney/ChatHub', + autoping=False, + params={'sec_access_token': conversation.conversationSignature}, + headers=headers + ) as wss: + await wss.send_str(format_message({'protocol': 'json', 'version': 1})) + await wss.send_str(format_message({"type": 6})) + await wss.receive_str() + await wss.send_str(create_message( + conversation, prompt, tone, + context if new_conversation else None, + image_request, web_search, gpt4_turbo, + new_conversation + )) + response_txt = '' + returned_text = '' + message_id = None + while do_read: + try: + msg = await wss.receive_str() + except TypeError: + continue + objects = msg.split(Defaults.delimiter) + for obj in objects: + if not obj: + continue + try: + response = json.loads(obj) + except ValueError: + continue + if response and response.get('type') == 1 and response['arguments'][0].get('messages'): + message = response['arguments'][0]['messages'][0] + if message_id is not None and message_id != message["messageId"]: + returned_text = '' + message_id = message["messageId"] + image_response = None + if (raise_apology and message['contentOrigin'] == 'Apology'): + raise ResponseError("Apology Response Error") + if 'adaptiveCards' in message: + card = message['adaptiveCards'][0]['body'][0] + if "text" in card: + response_txt = card.get('text') + if message.get('messageType') and "inlines" in card: + inline_txt = card['inlines'][0].get('text') + response_txt += f"{inline_txt}\n" + elif message.get('contentType') == "IMAGE": + prompt = message.get('text') + try: + image_client = BingCreateImages(cookies, proxy, api_key) + image_response = await image_client.create_async(prompt) + except Exception as e: + if debug.logging: + print(f"Bing: Failed to create images: {e}") + image_response = f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}" + if response_txt.startswith(returned_text): + new = response_txt[len(returned_text):] + if new not in ("", "\n"): + yield new + returned_text = response_txt + if image_response is not None: + yield image_response + elif response.get('type') == 2: + result = response['item']['result'] + do_read = False + if result.get('error'): + max_retries -= 1 + if max_retries < 1: + if result["value"] == "CaptchaChallenge": + raise RateLimitError(f"{result['value']}: Use other cookies or/and ip address") + else: + raise RuntimeError(f"{result['value']}: {result['message']}") + if debug.logging: + print(f"Bing: Retry: {result['value']}: {result['message']}") + headers = await create_headers() + conversation = None + await asyncio.sleep(sleep_retry) + break + elif response.get('type') == 3: + do_read = False + break + if conversation is not None: + await delete_conversation(session, conversation, headers) diff --git a/g4f/Provider/deprecated/__init__.py b/g4f/Provider/deprecated/__init__.py index 368a71a0..afb636e8 100644 --- a/g4f/Provider/deprecated/__init__.py +++ b/g4f/Provider/deprecated/__init__.py @@ -32,3 +32,4 @@ from .GPTalk import GPTalk from .Hashnode import Hashnode from .Ylokh import Ylokh from .OpenAssistant import OpenAssistant +from .Bing import Bing \ No newline at end of file diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index 856f9fb5..977a8908 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -108,6 +108,25 @@ body { border: 1px solid var(--blur-border); } +.new_version { + position: absolute; + right: 0; + top: 0; + padding: 10px; + font-weight: 500; + background-color: rgba(0, 0, 0, 0.5); + color: var(--colour-3); +} + +.white .new_version { + color: var(--colour-1); +} + +.new_version a { + color: var(--colour-4); + text-decoration: underline dotted; +} + .conversations { max-width: 300px; padding: var(--section-gap); diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index fd9cc50e..0bf49ac3 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -1331,15 +1331,21 @@ async function load_version() { document.title = 'g4f - ' + versions["version"]; let text = "version ~ " if (versions["version"] != versions["latest_version"]) { - let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"]; + let release_url = 'https://github.com/xtekky/gpt4free/releases/latest'; let title = `New version: ${versions["latest_version"]}`; text += `${versions["version"]} 🆕`; + const new_version = document.createElement("div"); + new_version.classList.add("new_version"); + const link = `v${versions["latest_version"]}`; + new_version.innerHTML = `g4f ${link}  ðŸ†•`; + new_version.addEventListener("click", ()=>new_version.parentElement.removeChild(new_version)); + document.body.appendChild(new_version); } else { text += versions["version"]; } document.getElementById("version_text").innerHTML = text } -setTimeout(load_version, 2000); +setTimeout(load_version, 100); [imageInput, cameraInput].forEach((el) => { el.addEventListener('click', async () => { diff --git a/requirements-slim.txt b/requirements-slim.txt index 2377faa3..a7e105db 100644 --- a/requirements-slim.txt +++ b/requirements-slim.txt @@ -3,7 +3,6 @@ pycryptodome curl_cffi>=0.6.2 aiohttp certifi -duckduckgo-search>=5.0 nest_asyncio werkzeug pillow -- cgit v1.2.3