summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/needs_auth/OpenaiChat.py
diff options
context:
space:
mode:
authorHeiner Lohaus <hlohaus@users.noreply.github.com>2024-11-19 15:26:03 +0100
committerHeiner Lohaus <hlohaus@users.noreply.github.com>2024-11-19 15:26:03 +0100
commit8f3fbee0d8a4ca69af72dae989ae8954181d0108 (patch)
tree0481d391ff397c7d4be93a7224c3717e03ae2b3f /g4f/Provider/needs_auth/OpenaiChat.py
parentFix generate messages on error in gui (diff)
downloadgpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.gz
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.bz2
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.lz
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.xz
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.tar.zst
gpt4free-8f3fbee0d8a4ca69af72dae989ae8954181d0108.zip
Diffstat (limited to 'g4f/Provider/needs_auth/OpenaiChat.py')
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py63
1 files changed, 35 insertions, 28 deletions
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 22264dd9..15a87f38 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -282,8 +282,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response.
"""
- prompt = element["metadata"]["dalle"]["prompt"]
- file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ try:
+ prompt = element["metadata"]["dalle"]["prompt"]
+ file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ except Exception as e:
+ raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
try:
async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
cls._update_request_args(session)
@@ -380,9 +383,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
image_request = None
- if debug.logging:
- print("OpenaiChat: Upload image failed")
- print(f"{e.__class__.__name__}: {e}")
+ debug.log("OpenaiChat: Upload image failed")
+ debug.log(f"{e.__class__.__name__}: {e}")
model = cls.get_model(model)
if conversation is None:
conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
@@ -419,11 +421,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
user_agent=cls._headers["user-agent"],
proof_token=RequestConfig.proof_token
)
- if debug.logging:
- print(
- 'Arkose:', False if not need_arkose else RequestConfig.arkose_token[:12]+"...",
- 'Proofofwork:', False if proofofwork is None else proofofwork[:12]+"...",
- )
+ [debug.log(text) for text in (
+ f"Arkose: {'False' if not need_arkose else RequestConfig.arkose_token[:12]+'...'}",
+ f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
+ )]
ws = None
if need_arkose:
async with session.post(f"{cls.url}/backend-api/register-websocket", headers=cls._headers) as response:
@@ -444,6 +445,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
}
if conversation.conversation_id is not None:
data["conversation_id"] = conversation.conversation_id
+ debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
if action != "continue":
messages = messages if conversation_id is None else [messages[-1]]
data["messages"] = cls.create_messages(messages, image_request)
@@ -468,8 +470,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._update_request_args(session)
if response.status == 403 and max_retries > 0:
max_retries -= 1
- if debug.logging:
- print(f"Retry: Error {response.status}: {await response.text()}")
+ debug.log(f"Retry: Error {response.status}: {await response.text()}")
await asyncio.sleep(5)
continue
await raise_for_status(response)
@@ -553,20 +554,27 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
elif isinstance(v, dict):
if fields.conversation_id is None:
fields.conversation_id = v.get("conversation_id")
- fields.message_id = v.get("message", {}).get("id")
- c = v.get("message", {}).get("content", {})
- if c.get("content_type") == "multimodal_text":
- generated_images = []
- for element in c.get("parts"):
- if element.get("content_type") == "image_asset_pointer":
- generated_images.append(
- cls.get_generated_image(session, cls._headers, element)
- )
- elif element.get("content_type") == "text":
- for part in element.get("parts", []):
- yield part
- for image_response in await asyncio.gather(*generated_images):
- yield image_response
+ debug.log(f"OpenaiChat: New conversation: {fields.conversation_id}")
+ m = v.get("message", {})
+ if m.get("author", {}).get("role") == "assistant":
+ fields.message_id = v.get("message", {}).get("id")
+ c = m.get("content", {})
+ if c.get("content_type") == "multimodal_text":
+ generated_images = []
+ for element in c.get("parts"):
+ if isinstance(element, str):
+ debug.log(f"No image or text: {line}")
+ elif element.get("content_type") == "image_asset_pointer":
+ generated_images.append(
+ cls.get_generated_image(session, cls._headers, element)
+ )
+ elif element.get("content_type") == "text":
+ for part in element.get("parts", []):
+ yield part
+ for image_response in await asyncio.gather(*generated_images):
+ yield image_response
+ else:
+ debug.log(f"OpenaiChat: {line}")
return
if "error" in line and line.get("error"):
raise RuntimeError(line.get("error"))
@@ -579,8 +587,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
user_data_dir = user_config_dir("g4f-nodriver")
else:
user_data_dir = None
- if debug.logging:
- print(f"Open nodriver with user_dir: {user_data_dir}")
+ debug.log(f"Open nodriver with user_dir: {user_data_dir}")
browser = await nodriver.start(
user_data_dir=user_data_dir,
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],