diff options
author | H Lohaus <hlohaus@users.noreply.github.com> | 2024-03-09 19:51:35 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-09 19:51:35 +0100 |
commit | b3d19c5660c1ed6c92b159087b5b0826682831c1 (patch) | |
tree | 80ba1d0a2d2cdbe44b6ad83c378057772d92725f | |
parent | Merge pull request #1665 from hlohaus/phind2 (diff) | |
parent | Add word count from iG8R (diff) | |
download | gpt4free-b3d19c5660c1ed6c92b159087b5b0826682831c1.tar gpt4free-b3d19c5660c1ed6c92b159087b5b0826682831c1.tar.gz gpt4free-b3d19c5660c1ed6c92b159087b5b0826682831c1.tar.bz2 gpt4free-b3d19c5660c1ed6c92b159087b5b0826682831c1.tar.lz gpt4free-b3d19c5660c1ed6c92b159087b5b0826682831c1.tar.xz gpt4free-b3d19c5660c1ed6c92b159087b5b0826682831c1.tar.zst gpt4free-b3d19c5660c1ed6c92b159087b5b0826682831c1.zip |
-rw-r--r-- | g4f/Provider/GeminiPro.py | 18 | ||||
-rw-r--r-- | g4f/Provider/bing/conversation.py | 2 | ||||
-rw-r--r-- | g4f/Provider/needs_auth/OpenaiChat.py | 103 | ||||
-rw-r--r-- | g4f/gui/client/css/style.css | 41 | ||||
-rw-r--r-- | g4f/gui/client/html/index.html | 6 | ||||
-rw-r--r-- | g4f/gui/client/js/chat.v1.js | 327 |
6 files changed, 267 insertions, 230 deletions
diff --git a/g4f/Provider/GeminiPro.py b/g4f/Provider/GeminiPro.py index 1c5487b1..4e31915a 100644 --- a/g4f/Provider/GeminiPro.py +++ b/g4f/Provider/GeminiPro.py @@ -26,38 +26,35 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): stream: bool = False, proxy: str = None, api_key: str = None, - api_base: str = None, - use_auth_header: bool = True, + api_base: str = "https://generativelanguage.googleapis.com/v1beta", + use_auth_header: bool = False, image: ImageType = None, connector: BaseConnector = None, **kwargs ) -> AsyncResult: - model = "gemini-pro-vision" if not model and image else model + model = "gemini-pro-vision" if not model and image is not None else model model = cls.get_model(model) if not api_key: raise MissingAuthError('Missing "api_key"') headers = params = None - if api_base and use_auth_header: + if use_auth_header: headers = {"Authorization": f"Bearer {api_key}"} else: params = {"key": api_key} - if not api_base: - api_base = f"https://generativelanguage.googleapis.com/v1beta" - method = "streamGenerateContent" if stream else "generateContent" url = f"{api_base.rstrip('/')}/models/{model}:{method}" async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: contents = [ { - "role": "model" if message["role"] == "assistant" else message["role"], + "role": "model" if message["role"] == "assistant" else "user", "parts": [{"text": message["content"]}] } for message in messages ] - if image: + if image is not None: image = to_bytes(image) contents[-1]["parts"].append({ "inline_data": { @@ -87,7 +84,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin): lines = [b"{\n"] elif chunk == b",\r\n" or chunk == b"]": try: - data = json.loads(b"".join(lines)) + data = b"".join(lines) + data = json.loads(data) yield data["candidates"][0]["content"]["parts"][0]["text"] except: data = data.decode() if isinstance(data, bytes) else data diff --git a/g4f/Provider/bing/conversation.py b/g4f/Provider/bing/conversation.py index 919d9c1d..4af9e5fe 100644 --- a/g4f/Provider/bing/conversation.py +++ b/g4f/Provider/bing/conversation.py @@ -46,7 +46,7 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv } for k, v in headers.items(): session.headers[k] = v - url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1579.2' + url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1' async with session.get(url, headers=headers, proxy=proxy) as response: try: data = await response.json() diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index e507404b..3d19e003 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -5,15 +5,15 @@ import uuid import json import os import base64 +import time from aiohttp import ClientWebSocketResponse try: from py_arkose_generator.arkose import get_values_for_request - from async_property import async_cached_property - has_requirements = True + has_arkose_generator = True except ImportError: - async_cached_property = property - has_requirements = False + has_arkose_generator = False + try: from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait @@ -33,7 +33,7 @@ from ... import debug class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): """A class for creating and managing conversations with OpenAI chat service""" - + url = "https://chat.openai.com" working = True needs_auth = True @@ -47,7 +47,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): _api_key: str = None _headers: dict = None _cookies: Cookies = None - _last_message: int = 0 + _expires: int = None @classmethod async def create( @@ -80,7 +80,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): A Response object that contains the generator, action, messages, and options """ # Add the user input to the messages list - if prompt: + if prompt is not None: messages.append({ "role": "user", "content": prompt @@ -102,7 +102,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): messages, kwargs ) - + @classmethod async def upload_image( cls, @@ -162,7 +162,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): response.raise_for_status() image_data["download_url"] = (await response.json())["download_url"] return ImageRequest(image_data) - + @classmethod async def get_default_model(cls, session: StreamSession, headers: dict): """ @@ -185,7 +185,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): return cls.default_model raise RuntimeError(f"Response: {data}") return cls.default_model - + @classmethod def create_messages(cls, messages: Messages, image_request: ImageRequest = None): """ @@ -334,9 +334,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): Raises: RuntimeError: If an error occurs during processing. """ - if not has_requirements: - raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package') - if not parent_id: + if parent_id is None: parent_id = str(uuid.uuid4()) # Read api_key from arguments @@ -348,7 +346,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): timeout=timeout ) as session: # Read api_key and cookies from cache / browser config - if cls._headers is None: + if cls._headers is None or cls._expires is None or time.time() > cls._expires: if api_key is None: # Read api_key from cookies cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies @@ -357,8 +355,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): else: api_key = cls._api_key if api_key is None else api_key # Read api_key with session cookies - if api_key is None and cookies: - api_key = await cls.fetch_access_token(session, cls._headers) + #if api_key is None and cookies: + # api_key = await cls.fetch_access_token(session, cls._headers) # Load default model if cls.default_model is None and api_key is not None: try: @@ -384,6 +382,19 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): else: cls._set_api_key(api_key) + async with session.post( + f"{cls.url}/backend-api/sentinel/chat-requirements", + json={"conversation_mode_kind": "primary_assistant"}, + headers=cls._headers + ) as response: + response.raise_for_status() + data = await response.json() + need_arkose = data["arkose"]["required"] + chat_token = data["token"] + + if need_arkose and not has_arkose_generator: + raise MissingRequirementsError('Install "py-arkose-generator" package') + try: image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None except Exception as e: @@ -394,12 +405,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): model = cls.get_model(model).replace("gpt-3.5-turbo", "text-davinci-002-render-sha") fields = ResponseFields() while fields.finish_reason is None: - arkose_token = await cls.get_arkose_token(session) conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id parent_id = parent_id if fields.message_id is None else fields.message_id data = { "action": action, - "arkose_token": arkose_token, "conversation_mode": {"kind": "primary_assistant"}, "force_paragen": False, "force_rate_limit": False, @@ -417,7 +426,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): json=data, headers={ "Accept": "text/event-stream", - "OpenAI-Sentinel-Arkose-Token": arkose_token, + **({"OpenAI-Sentinel-Arkose-Token": await cls.get_arkose_token(session)} if need_arkose else {}), + "OpenAI-Sentinel-Chat-Requirements-Token": chat_token, **cls._headers } ) as response: @@ -437,17 +447,20 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): await cls.delete_conversation(session, cls._headers, fields.conversation_id) @staticmethod - async def iter_messages_ws(ws: ClientWebSocketResponse) -> AsyncIterator: + async def iter_messages_ws(ws: ClientWebSocketResponse, conversation_id: str) -> AsyncIterator: while True: - yield base64.b64decode((await ws.receive_json())["body"]) + message = await ws.receive_json() + if message["conversation_id"] == conversation_id: + yield base64.b64decode(message["body"]) @classmethod async def iter_messages_chunk(cls, messages: AsyncIterator, session: StreamSession, fields: ResponseFields) -> AsyncIterator: last_message: int = 0 async for message in messages: if message.startswith(b'{"wss_url":'): - async with session.ws_connect(json.loads(message)["wss_url"]) as ws: - async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws), session, fields): + message = json.loads(message) + async with session.ws_connect(message["wss_url"]) as ws: + async for chunk in cls.iter_messages_chunk(cls.iter_messages_ws(ws, message["conversation_id"]), session, fields): yield chunk break async for chunk in cls.iter_messages_line(session, message, fields): @@ -467,6 +480,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): if not line.startswith(b"data: "): return elif line.startswith(b"data: [DONE]"): + if fields.finish_reason is None: + fields.finish_reason = "error" return try: line = json.loads(line[6:]) @@ -589,22 +604,13 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): @classmethod def _set_api_key(cls, api_key: str): cls._api_key = api_key + cls._expires = int(time.time()) + 60 * 60 * 4 cls._headers["Authorization"] = f"Bearer {api_key}" @classmethod def _update_cookie_header(cls): cls._headers["Cookie"] = cls._format_cookies(cls._cookies) -class EndTurn: - """ - Class to represent the end of a conversation turn. - """ - def __init__(self): - self.is_end = False - - def end(self): - self.is_end = True - class ResponseFields: """ Class to encapsulate response fields. @@ -633,8 +639,8 @@ class Response(): self._options = options self._fields = None - async def generator(self): - if self._generator: + async def generator(self) -> AsyncIterator: + if self._generator is not None: self._generator = None chunks = [] async for chunk in self._generator: @@ -644,27 +650,29 @@ class Response(): yield chunk chunks.append(str(chunk)) self._message = "".join(chunks) - if not self._fields: + if self._fields is None: raise RuntimeError("Missing response fields") - self.is_end = self._fields.end_turn + self.is_end = self._fields.finish_reason == "stop" def __aiter__(self): return self.generator() - @async_cached_property - async def message(self) -> str: + async def get_message(self) -> str: await self.generator() return self._message - async def get_fields(self): + async def get_fields(self) -> dict: await self.generator() - return {"conversation_id": self._fields.conversation_id, "parent_id": self._fields.message_id} + return { + "conversation_id": self._fields.conversation_id, + "parent_id": self._fields.message_id + } - async def next(self, prompt: str, **kwargs) -> Response: + async def create_next(self, prompt: str, **kwargs) -> Response: return await OpenaiChat.create( **self._options, prompt=prompt, - messages=await self.messages, + messages=await self.get_messages(), action="next", **await self.get_fields(), **kwargs @@ -676,13 +684,13 @@ class Response(): raise RuntimeError("Can't continue message. Message already finished.") return await OpenaiChat.create( **self._options, - messages=await self.messages, + messages=await self.get_messages(), action="continue", **fields, **kwargs ) - async def variant(self, **kwargs) -> Response: + async def create_variant(self, **kwargs) -> Response: if self.action != "next": raise RuntimeError("Can't create variant from continue or variant request.") return await OpenaiChat.create( @@ -693,8 +701,7 @@ class Response(): **kwargs ) - @async_cached_property - async def messages(self): + async def get_messages(self) -> list: messages = self._messages - messages.append({"role": "assistant", "content": await self.message}) + messages.append({"role": "assistant", "content": await self.message()}) return messages
\ No newline at end of file diff --git a/g4f/gui/client/css/style.css b/g4f/gui/client/css/style.css index 6ae720f3..eb71fa72 100644 --- a/g4f/gui/client/css/style.css +++ b/g4f/gui/client/css/style.css @@ -65,6 +65,7 @@ :root { --font-1: "Inter", sans-serif; --section-gap: 25px; + --inner-gap: 15px; --border-radius-1: 8px; } @@ -204,6 +205,12 @@ body { gap: 10px; } +.conversations .convo .choise { + position: absolute; + right: 8px; + background-color: var(--blur-bg); +} + .conversations i { color: var(--conversations); cursor: pointer; @@ -222,10 +229,14 @@ body { overflow-wrap: break-word; display: flex; gap: var(--section-gap); - padding: var(--section-gap); + padding: var(--inner-gap) var(--section-gap); padding-bottom: 0; } +.message.regenerate { + opacity: 0.75; +} + .message:last-child { animation: 0.6s show_message; } @@ -393,10 +404,10 @@ body { #input-count { width: fit-content; font-size: 12px; - padding: 6px 15px; + padding: 6px var(--inner-gap); } -.stop_generating, .regenerate { +.stop_generating, .toolbar .regenerate { position: absolute; z-index: 1000000; top: 0; @@ -404,20 +415,20 @@ body { } @media only screen and (min-width: 40em) { - .stop_generating, .regenerate { + .stop_generating, .toolbar .regenerate { left: 50%; transform: translateX(-50%); right: auto; } } -.stop_generating button, .regenerate button{ +.stop_generating button, .toolbar .regenerate button{ backdrop-filter: blur(20px); -webkit-backdrop-filter: blur(20px); background-color: var(--blur-bg); border-radius: var(--border-radius-1); border: 1px solid var(--blur-border); - padding: 5px 15px; + padding: 5px var(--inner-gap); color: var(--colour-3); display: flex; justify-content: center; @@ -601,7 +612,6 @@ select { .input-box { display: flex; align-items: center; - padding-right: 15px; cursor: pointer; } @@ -785,7 +795,7 @@ a:-webkit-any-link { font-size: 15px; width: 100%; height: 100%; - padding: 12px 15px; + padding: 12px var(--inner-gap); background: none; border: none; outline: none; @@ -990,10 +1000,21 @@ a:-webkit-any-link { padding-right: 5px; padding-top: 2px; padding-bottom: 2px; - top: 20px; - left: 8px; + position: absolute; + bottom: 8px; + right: 8px; } #send-button:hover { border: 1px solid #e4d4ffc9; } + +#systemPrompt { + font-size: 15px; + width: 100%; + color: var(--colour-3); + height: 50px; + outline: none; + padding: var(--inner-gap) var(--section-gap); + resize: vertical; +}
\ No newline at end of file diff --git a/g4f/gui/client/html/index.html b/g4f/gui/client/html/index.html index 86ab157d..85192d23 100644 --- a/g4f/gui/client/html/index.html +++ b/g4f/gui/client/html/index.html @@ -37,10 +37,6 @@ import llamaTokenizer from "llama-tokenizer-js" </script> <script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script> - <script type="module" async> - import { countWords } from 'https://esm.run/alfaaz'; - window.countWords = countWords; - </script> <script> const user_image = '<img src="/assets/img/user.png" alt="your avatar">'; const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">'; @@ -55,7 +51,6 @@ } #message-input { - margin-right: 30px; height: 82px; margin-left: 20px; } @@ -116,6 +111,7 @@ </div> </div> <div class="conversation"> + <textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea> <div id="messages" class="box"></div> <div class="toolbar"> <div id="input-count" class=""> diff --git a/g4f/gui/client/js/chat.v1.js b/g4f/gui/client/js/chat.v1.js index 7135b8f6..8dd17275 100644 --- a/g4f/gui/client/js/chat.v1.js +++ b/g4f/gui/client/js/chat.v1.js @@ -1,28 +1,29 @@ const colorThemes = document.querySelectorAll('[name="theme"]'); const markdown = window.markdownit(); const message_box = document.getElementById(`messages`); -const message_input = document.getElementById(`message-input`); +const messageInput = document.getElementById(`message-input`); const box_conversations = document.querySelector(`.top`); const stop_generating = document.querySelector(`.stop_generating`); const regenerate = document.querySelector(`.regenerate`); const sidebar = document.querySelector(".conversations"); const sidebar_button = document.querySelector(".mobile-sidebar"); -const send_button = document.getElementById("send-button"); +const sendButton = document.getElementById("send-button"); const imageInput = document.getElementById("image"); const cameraInput = document.getElementById("camera"); const fileInput = document.getElementById("file"); const inputCount = document.getElementById("input-count") const modelSelect = document.getElementById("model"); +const systemPrompt = document.getElementById("systemPrompt") let prompt_lock = false; hljs.addPlugin(new CopyButtonPlugin()); -message_input.addEventListener("blur", () => { +messageInput.addEventListener("blur", () => { window.scrollTo(0, 0); }); -message_input.addEventListener("focus", () => { +messageInput.addEventListener("focus", () => { document.documentElement.scrollTop = document.documentElement.scrollHeight; }); @@ -59,7 +60,7 @@ const register_remove_message = async () => { } const message_el = el.parentElement.parentElement; await remove_message(window.conversation_id, message_el.dataset.index); - await load_conversation(window.conversation_id); + await load_conversation(window.conversation_id, false); }) } }); @@ -77,13 +78,13 @@ const delete_conversations = async () => { }; const handle_ask = async () => { - message_input.style.height = `82px`; - message_input.focus(); + messageInput.style.height = "82px"; + messageInput.focus(); window.scrollTo(0, 0); - message = message_input.value + message = messageInput.value if (message.length > 0) { - message_input.value = ''; + messageInput.value = ""; prompt_lock = true; count_input() await add_conversation(window.conversation_id, message); @@ -135,7 +136,7 @@ const remove_cancel_button = async () => { }, 300); }; -const filter_messages = (messages, filter_last_message = true) => { +const prepare_messages = (messages, filter_last_message = true) => { // Removes none user messages at end if (filter_last_message) { let last_message; @@ -147,7 +148,7 @@ const filter_messages = (messages, filter_last_message = true) => { } } - // Remove history, if it is selected + // Remove history, if it's selected if (document.getElementById('history')?.checked) { if (filter_last_message) { messages = [messages.pop()]; @@ -157,20 +158,31 @@ const filter_messages = (messages, filter_last_message = true) => { } let new_messages = []; - for (i in messages) { - new_message = messages[i]; - // Remove generated images from history - new_message["content"] = new_message["content"].replaceAll( - /<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, - "" - ) - delete new_message["provider"]; - // Remove regenerated messages - if (!new_message.regenerate) { - new_messages.push(new_message) + if (messages) { + for (i in messages) { + new_message = messages[i]; + // Remove generated images from history + new_message.content = new_message.content.replaceAll( + /<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, + "" + ) + delete new_message["provider"]; + // Remove regenerated messages + if (!new_message.regenerate) { + new_messages.push(new_message) + } } } + // Add system message + system_content = systemPrompt?.value; + if (system_content) { + new_messages.unshift({ + "role": "system", + "content": system_content + }); + } + return new_messages; } @@ -179,7 +191,7 @@ const ask_gpt = async () => { messages = await get_messages(window.conversation_id); total_messages = messages.length; - messages = filter_messages(messages); + messages = prepare_messages(messages); window.scrollTo(0, 0); window.controller = new AbortController(); @@ -192,8 +204,6 @@ const ask_gpt = async () => { message_box.scrollTop = message_box.scrollHeight; window.scrollTo(0, 0); - await new Promise((r) => setTimeout(r, 500)); - window.scrollTo(0, 0); el = message_box.querySelector('.count_total'); el ? el.parentElement.removeChild(el) : null; @@ -218,6 +228,8 @@ const ask_gpt = async () => { message_box.scrollTop = message_box.scrollHeight; window.scrollTo(0, 0); + + error = provider_result = null; try { let body = JSON.stringify({ id: window.token, @@ -241,49 +253,47 @@ const ask_gpt = async () => { } else { headers['content-type'] = 'application/json'; } + const response = await fetch(`/backend-api/v2/conversation`, { method: 'POST', signal: window.controller.signal, headers: headers, body: body }); - - await new Promise((r) => setTimeout(r, 1000)); - window.scrollTo(0, 0); - const reader = response.body.pipeThrough(new TextDecoderStream()).getReader(); - error = provider = null; while (true) { const { value, done } = await reader.read(); if (done) break; for (const line of value.split("\n")) { - if (!line) continue; + if (!line) { + continue; + } const message = JSON.parse(line); if (message.type == "content") { text += message.content; - } else if (message["type"] == "provider") { - provider = message.provider + } else if (message.type == "provider") { + provider_result = message.provider content.querySelector('.provider').innerHTML = ` - <a href="${provider.url}" target="_blank"> - ${provider.name} + <a href="${provider_result.url}" target="_blank"> + ${provider_result.name} </a> - ${provider.model ? ' with ' + provider.model : ''} + ${provider_result.model ? ' with ' + provider_result.model : ''} ` - } else if (message["type"] == "error") { - error = message["error"]; - } else if (message["type"] == "message") { - console.error(message["message"]) + } else if (message.type == "error") { + error = message.error; + } else if (messag.type == "message") { + console.error(messag.message) } } if (error) { console.error(error); - content_inner.innerHTML += "<p>An error occured, please try again, if the problem persists, please use a other model or provider.</p>"; + content_inner.innerHTML += `<p><strong>An error occured:</strong> ${error}</p>`; } else { html = markdown_render(text); let lastElement, lastIndex = null; - for (element of ['</p>', '</code></pre>', '</li>\n</ol>', '</li>\n</ul>']) { + for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) { const index = html.lastIndexOf(element) - if (index > lastIndex) { + if (index - element.length > lastIndex) { lastElement = element; lastIndex = index; } @@ -292,7 +302,7 @@ const ask_gpt = async () => { html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement; } content_inner.innerHTML = html; - content_count.innerText = count_words_and_tokens(text, provider?.model); + content_count.innerText = count_words_and_tokens(text, provider_result?.model); highlight(content_inner); } @@ -302,7 +312,6 @@ const ask_gpt = async () => { } } if (!error) { - // Remove cursor html = markdown_render(text); content_inner.innerHTML = html; highlight(content_inner); @@ -313,30 +322,29 @@ const ask_gpt = async () => { } } catch (e) { console.error(e); - if (e.name != "AbortError") { error = true; text = "oops ! something went wrong, please try again / reload. [stacktrace in console]"; content_inner.innerHTML = text; } else { - content_inner.innerHTML += ` [aborted]`; - text += ` [aborted]` + content_inner.innerHTML += " [aborted]"; + if (text) text += " [aborted]"; } } - if (!error) { - await add_message(window.conversation_id, "assistant", text, provider); + if (!error && text) { + await add_message(window.conversation_id, "assistant", text, provider_result); await load_conversation(window.conversation_id); } else { let cursorDiv = document.getElementById(`cursor`); if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv); } + window.scrollTo(0, 0); message_box.scrollTop = message_box.scrollHeight; await remove_cancel_button(); await register_remove_message(); prompt_lock = false; - window.scrollTo(0, 0); await load_conversations(); - regenerate.classList.remove(`regenerate-hidden`); + regenerate.classList.remove("regenerate-hidden"); }; const clear_conversations = async () => { @@ -366,22 +374,18 @@ const clear_conversation = async () => { const show_option = async (conversation_id) => { const conv = document.getElementById(`conv-${conversation_id}`); - const yes = document.getElementById(`yes-${conversation_id}`); - const not = document.getElementById(`not-${conversation_id}`); + const choi = document.getElementById(`cho-${conversation_id}`); - conv.style.display = `none`; - yes.style.display = `block`; - not.style.display = `block`; + conv.style.display = "none"; + choi.style.display = "block"; }; const hide_option = async (conversation_id) => { const conv = document.getElementById(`conv-${conversation_id}`); - const yes = document.getElementById(`yes-${conversation_id}`); - const not = document.getElementById(`not-${conversation_id}`); + const choi = document.getElementById(`cho-${conversation_id}`); - conv.style.display = `block`; - yes.style.display = `none`; - not.style.display = `none`; + conv.style.display = "block"; + choi.style.display = "none"; }; const delete_conversation = async (conversation_id) => { @@ -412,23 +416,31 @@ const new_conversation = async () => { window.conversation_id = uuid(); await clear_conversation(); + if (systemPrompt) { + systemPrompt.value = ""; + } load_conversations(); hide_sidebar(); say_hello(); }; -const load_conversation = async (conversation_id) => { - let messages = await get_messages(conversation_id); +const load_conversation = async (conversation_id, scroll = true) => { + let conversation = await get_conversation(conversation_id); + let messages = conversation?.items || []; + + if (systemPrompt) { + systemPrompt.value = conversation.system || ""; + } let elements = ""; let last_model = null; for (i in messages) { let item = messages[i]; - last_model = item?.provider?.model; + last_model = item.provider?.model; let next_i = parseInt(i) + 1; let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null); - let provider_link = item.provider?.name ? `<a href="${item.provider?.url}" target="_blank">${item.provider.name}</a>` : ""; + let provider_link = item.provider?.name ? `<a href="${item.provider.url}" target="_blank">${item.provider.name}</a>` : ""; let provider = provider_link ? ` <div class="provider"> ${provider_link} @@ -436,7 +448,7 @@ const load_conversation = async (conversation_id) => { </div> ` : ""; elements += ` - <div class="message" data-index="${i}"> + <div class="message${item.regenerate ? " regenerate": ""}" data-index="${i}"> <div class="${item.role}"> ${item.role == "assistant" ? gpt_image : user_image} <i class="fa-solid fa-xmark"></i> @@ -454,7 +466,7 @@ const load_conversation = async (conversation_id) => { `; } - const filtered = filter_messages(messages, false); + const filtered = prepare_messages(messages, false); if (filtered.length > 0) { last_model = last_model?.startsWith("gpt-4") ? "gpt-4" : "gpt-3.5-turbo" let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length @@ -468,44 +480,35 @@ const load_conversation = async (conversation_id) => { register_remove_message(); highlight(message_box); - message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" }); + if (scroll) { + message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" }); - setTimeout(() => { - message_box.scrollTop = message_box.scrollHeight; - }, 500); -}; - -function count_tokens(model, text) { - if (model.startsWith("gpt-3") || model.startsWith("gpt-4")) { - return GPTTokenizer_cl100k_base?.encode(text).length; - } - if (model.startsWith("llama2") || model.startsWith("codellama")) { - return llamaTokenizer?.encode(text).length; + setTimeout(() => { + message_box.scrollTop = message_box.scrollHeight; + }, 500); } - if (model.startsWith("mistral") || model.startsWith("mixtral")) { - return mistralTokenizer?.encode(text).length; - } -} - -function count_words_and_tokens(text, model) { - const tokens_count = model ? count_tokens(model, text) : null; - const tokens_append = tokens_count ? `, ${tokens_count} tokens` : ""; - return countWords ? `(${countWords(text)} words${tokens_append})` : ""; -} +}; -const get_conversation = async (conversation_id) => { +async function get_conversation(conversation_id) { let conversation = await JSON.parse( localStorage.getItem(`conversation:${conversation_id}`) ); return conversation; -}; +} + +async function save_conversation(conversation_id, conversation) { + localStorage.setItem( + `conversation:${conversation_id}`, + JSON.stringify(conversation) + ); +} -const get_messages = async (conversation_id) => { +async function get_messages(conversation_id) { let conversation = await get_conversation(conversation_id); return conversation?.items || []; -}; +} -const add_conversation = async (conversation_id, content) => { +async function add_conversation(conversation_id, content) { if (content.length > 17) { title = content.substring(0, 17) + '...' } else { @@ -513,31 +516,34 @@ const add_conversation = async (conversation_id, content) => { } if (localStorage.getItem(`conversation:${conversation_id}`) == null) { - localStorage.setItem( - `conversation:${conversation_id}`, - JSON.stringify({ - id: conversation_id, - title: title, - items: [], - }) - ); + await save_conversation(conversation_id, { + id: conversation_id, + title: title, + system: systemPrompt?.value, + items: [], + }); } history.pushState({}, null, `/chat/${conversation_id}`); -}; +} + +async function save_system_message() { + if (!window.conversation_id) return; + const conversation = await get_conversation(window.conversation_id); + conversation.system = systemPrompt?.value; + await save_conversation(window.conversation_id, conversation); +} const hide_last_message = async (conversation_id) => { const conversation = await get_conversation(conversation_id) const last_message = conversation.items.pop(); - if (last_message["role"] == "assistant") { - last_message["regenerate"] = true; + if (last_message !== null) { + if (last_message["role"] == "assistant") { + last_message["regenerate"] = true; + } + conversation.items.push(last_message); } - conversation.items.push(last_message); - - localStorage.setItem( - `conversation:${conversation_id}`, - JSON.stringify(conversation) - ); + await save_conversation(conversation_id, conversation); }; const remove_message = async (conversation_id, index) => { @@ -545,17 +551,16 @@ const remove_message = async (conversation_id, index) => { let new_items = []; for (i in conversation.items) { if (i == index - 1) { - delete conversation.items[i]["regenerate"]; + if (!conversation.items[index]?.regenerate) { + delete conversation.items[i]["regenerate"]; + } } if (i != index) { new_items.push(conversation.items[i]) } } conversation.items = new_items; - localStorage.setItem( - `conversation:${conversation_id}`, - JSON.stringify(conversation) - ); + await save_conversation(conversation_id, conversation); }; const add_message = async (conversation_id, role, content, provider) => { @@ -566,12 +571,7 @@ const add_message = async (conversation_id, role, content, provider) => { content: content, provider: provider }); - - localStorage.setItem( - `conversation:${conversation_id}`, - JSON.stringify(conversation) - ); - + await save_conversation(conversation_id, conversation); return conversation.items.length - 1; }; @@ -594,8 +594,10 @@ const load_conversations = async () => { <span class="convo-title">${conversation.title}</span> </div> <i onclick="show_option('${conversation.id}')" class="fa-regular fa-trash" id="conv-${conversation.id}"></i> - <i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-check" id="yes-${conversation.id}" style="display:none;"></i> - <i onclick="hide_option('${conversation.id}')" class="fa-regular fa-x" id="not-${conversation.id}" style="display:none;"></i> + <div id="cho-${conversation.id}" class="choise" style="display:none;"> + <i onclick="delete_conversation('${conversation.id}')" class="fa-regular fa-check"></i> + <i onclick="hide_option('${conversation.id}')" class="fa-regular fa-x"></i> + </div> </div> `; } @@ -733,15 +735,45 @@ colorThemes.forEach((themeOption) => { }); }); +function count_tokens(model, text) { + if (model) { + if (model.startsWith("llama2") || model.startsWith("codellama")) { + return llamaTokenizer?.encode(text).length; + } + if (model.startsWith("mistral") || model.startsWith("mixtral")) { + return mistralTokenizer?.encode(text).length; + } + } + return GPTTokenizer_cl100k_base?.encode(text).length; +} + +function count_words(text) { + return text.trim().match(/[\w\u4E00-\u9FA5]+/gu)?.length || 0; +} + +function count_words_and_tokens(text, model) { + return `(${count_words(text)} words, ${count_tokens(model, text)} tokens)`; +} + +let countFocus = messageInput; const count_input = async () => { - if (message_input.value) { + if (countFocus.value) { model = modelSelect.options[modelSelect.selectedIndex].value; - inputCount.innerText = count_words_and_tokens(message_input.value, model); + inputCount.innerText = count_words_and_tokens(countFocus.value, model); } else { inputCount.innerHTML = " " } }; -message_input.addEventListener("keyup", count_input); +messageInput.addEventListener("keyup", count_input); +systemPrompt.addEventListener("keyup", count_input); +systemPrompt.addEventListener("focus", function() { + countFocus = systemPrompt; + count_input(); +}); +systemPrompt.addEventListener("blur", function() { + countFocus = messageInput; + count_input(); +}); window.onload = async () => { setTheme(); @@ -754,11 +786,9 @@ window.onload = async () => { say_hello() } - setTimeout(() => { - load_conversations(); - }, 1); + load_conversations(); - message_input.addEventListener("keydown", async (evt) => { + messageInput.addEventListener("keydown", async (evt) => { if (prompt_lock) return; if (evt.keyCode === 13 && !evt.shiftKey) { @@ -766,41 +796,22 @@ window.onload = async () => { console.log("pressed enter"); await handle_ask(); } else { - message_input.style.removeProperty("height"); - message_input.style.height = message_input.scrollHeight + "px"; + messageInput.style.removeProperty("height"); + messageInput.style.height = messageInput.scrollHeight + "px"; } }); - send_button.addEventListener(`click`, async () => { + sendButton.addEventListener(`click`, async () => { console.log("clicked send"); if (prompt_lock) return; await handle_ask(); }); + messageInput.focus(); + register_settings_localstorage(); }; -const observer = new MutationObserver((mutationsList) => { - for (const mutation of mutationsList) { - if (mutation.type === 'attributes' && mutation.attributeName === 'style') { - const height = message_input.offsetHeight; - - let heightValues = { - 81: "20px", - 82: "20px", - 100: "30px", - 119: "39px", - 138: "49px", - 150: "55px" - } - - send_button.style.top = heightValues[height] || ''; - } - } -}); - -observer.observe(message_input, { attributes: true }); - (async () => { response = await fetch('/backend-api/v2/models') models = await response.json() @@ -875,4 +886,8 @@ fileInput.addEventListener('change', async (event) => { } else { delete fileInput.dataset.text; } +}); + +systemPrompt?.addEventListener("blur", async () => { + await save_system_message(); });
\ No newline at end of file |