From dc10f776fdcbe7d3522d8c2f5e9ec67ccbd90e6c Mon Sep 17 00:00:00 2001
From: ruecat <79139779+ruecat@users.noreply.github.com>
Date: Thu, 7 Dec 2023 22:10:36 +0300
Subject: [PATCH] =?UTF-8?q?=E2=9C=88=EF=B8=8F=20Simplify=20bot,=20improve?=
=?UTF-8?q?=20API=20and=20optimize=20environment?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
bot/.env.example | 8 +-
bot/func/controller.py | 46 +++++++++++
bot/ollama-run.py | 179 -----------------------------------------
bot/run.py | 111 +++++++++++++++++++++++++
4 files changed, 161 insertions(+), 183 deletions(-)
create mode 100644 bot/func/controller.py
delete mode 100644 bot/ollama-run.py
create mode 100644 bot/run.py
diff --git a/bot/.env.example b/bot/.env.example
index 1cf3025..38924cc 100644
--- a/bot/.env.example
+++ b/bot/.env.example
@@ -1,4 +1,4 @@
-TOKEN=''
-ADMIN_IDS=''
-USER_IDS=''
-INITMODEL=''
\ No newline at end of file
+TOKEN=0123
+ADMIN_IDS=000,111
+USER_IDS=000,111
+INITMODEL=llama-2
\ No newline at end of file
diff --git a/bot/func/controller.py b/bot/func/controller.py
new file mode 100644
index 0000000..c08034f
--- /dev/null
+++ b/bot/func/controller.py
@@ -0,0 +1,46 @@
+import aiohttp
+import json
+from dotenv import load_dotenv
+import os
+load_dotenv()
+system_info = os.uname()
+token = os.getenv("TOKEN")
+allowed_ids = list(map(int, os.getenv('USER_IDS', '').split(',')))
+admin_ids = list(map(int, os.getenv('ADMIN_IDS', '').split(',')))
+# Will be implemented soon
+#content = []
+
+async def fetcher():
+ async with aiohttp.ClientSession() as session:
+ url = 'http://localhost:11434/api/tags'
+ async with session.get(url) as response:
+ if response.status == 200:
+ data = await response.json()
+ return data['models']
+ else:
+ return []
+
+
+async def streamer(prompt: str, modelname: str):
+ #try:
+ async with aiohttp.ClientSession() as session:
+ print("Api triggered")
+ url = 'http://localhost:11434/api/generate'
+ #content.append(prompt)
+ #print(f'Content updated: {content}')
+ data = {
+ "model": modelname,
+ "prompt": prompt,
+ "stream": True,
+ #"context": content
+ }
+ print(f"DEBUG\n{modelname}\n{prompt}")
+ # Stream from API
+ async with session.post(url, json=data) as response:
+ async for chunk in response.content:
+ if chunk:
+ decoded_chunk = chunk.decode()
+ if decoded_chunk.strip():
+ yield json.loads(decoded_chunk)
+ # except:
+ # print("---------\n[Ollama-API ERROR]\nNON_DOCKER: Make sure your Ollama API server is running ('ollama serve' command)\nDOCKER: Check Ollama container and try again\n---------")
\ No newline at end of file
diff --git a/bot/ollama-run.py b/bot/ollama-run.py
deleted file mode 100644
index ad3079a..0000000
--- a/bot/ollama-run.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import aiogram
-import aiohttp
-import asyncio
-import json
-import os
-from dotenv import load_dotenv
-load_dotenv()
-from aiogram import Bot, Dispatcher, types
-from aiogram.filters.command import CommandStart
-from aiogram.utils.keyboard import InlineKeyboardBuilder
-from aiogram.types import Message
-token = os.environ['TOKEN']
-uid_get = os.environ['USER_IDS'].split(",")
-allowed_ids = [int(x) for x in uid_get]
-aid_get = os.environ['USER_IDS']
-admin_ids = [int(x) for x in aid_get]
-modelname = os.environ['INITMODEL']
-if token == "yourtoken":
- print("Uh-Oh!\nPlease enter your Telegram bot TOKEN in .env file")
- exit("FATAL: NO_TOKEN_PROVIDED")
-# --- --- --- --- --- --- --- ---
-bot = Bot(token=token)
-dp = Dispatcher()
-builder = InlineKeyboardBuilder()
-builder.row(types.InlineKeyboardButton(text="š¤ļø Information", callback_data="info"),
- types.InlineKeyboardButton(text="āļø Change Model", callback_data="modelmanager"))
-# Kernel swap options
-
-
-@dp.message(CommandStart())
-async def command_start_handler(message: Message) -> None:
- if message.from_user.id in allowed_ids:
- await message.answer(
- f"Hello, {message.from_user.full_name}\nChoose action",
- parse_mode="HTML", reply_markup=builder.as_markup())
- else:
- await message.answer(
- f"{message.from_user.full_name} - [Auth-Blocked]
\nContact staff to whitelist you.",
- parse_mode="HTML")
-
-@dp.callback_query(lambda query: query.data == 'modelmanager')
-async def modelmanager_callback_handler(query: types.CallbackQuery):
- if query.from_user.id in admin_ids:
- try:
- models = await fetch_models()
-
- # Create a new InlineKeyboardBuilder for the fetched models
- modelmanager_builder = InlineKeyboardBuilder()
- for model in models:
- modelname = model['name']
- # Add a button for each model
- modelmanager_builder.row(
- types.InlineKeyboardButton(text=modelname, callback_data=f"model_{modelname}")
- )
-
- # Send a new message with the new keyboard or edit the existing message
- await query.message.edit_text(
- "Choose model:",
- reply_markup=modelmanager_builder.as_markup()
- )
- except:
- await query.message.edit_text("[Ollama-API ERROR]\nNON_DOCKER: Make sure your Ollama API server is running ('ollama serve' command).\nDOCKER: Check Ollama container and try again", parse_mode="HTML")
- else:
- await query.answer("Access Denied")
-
-@dp.callback_query(lambda query: query.data.startswith('model_'))
-async def model_callback_handler(query: types.CallbackQuery):
- global modelname
- modelname = query.data.split('model_')[1] # This will modify the modelname in the bot_state instance
- print(modelname)
- await query.answer(f"Chosen model: {modelname}")
-
-
-@dp.callback_query(lambda query: query.data == 'info')
-async def systeminfo_callback_handler(query: types.CallbackQuery):
- if query.from_user.id in admin_ids:
- await query.answer("Fetching info...")
- await bot.send_message(chat_id=query.message.chat.id,
- text=f"š¦ About System\nāļø Current model: {modelname}
\n(Other options will be added soon..)",
- parse_mode="HTML")
- else:
- await query.answer("Access Denied")
-
-def escape_html(text):
- return text.replace("&", "&").replace("<", "<").replace(">", ">")
-async def fetch_models():
- async with aiohttp.ClientSession() as session:
- url = 'http://localhost:11434/api/tags'
- async with session.get(url) as response:
- if response.status == 200:
- data = await response.json()
- return data['models']
- else:
- return []
-async def stream_request(prompt: str):
- try:
- async with aiohttp.ClientSession() as session:
- global modelname
- # Default link to OllamaAPI
- url = 'http://localhost:11434/api/generate'
- # Ollama parameters
- data = {
- "model": modelname,
- "prompt": prompt,
- "stream": True
- }
- # Stream from API
- async with session.post(url, json=data) as response: # Use json=data to send JSON
- async for chunk in response.content:
- if chunk:
- decoded_chunk = chunk.decode()
- if decoded_chunk.strip(): # Avoid empty lines
- yield json.loads(decoded_chunk)
- except:
- print("---------\n[Ollama-API ERROR]\nNON_DOCKER: Make sure your Ollama API server is running ('ollama serve' command)\nDOCKER: Check Ollama container and try again\n---------")
-
-
-@dp.message()
-async def handle_message(message: types.Message):
- botinfo = await bot.get_me()
- is_allowed_user = message.from_user.id in allowed_ids
- is_private_chat = message.chat.type == "private"
- is_supergroup = message.chat.type == "supergroup"
- bot_mentioned = any(
- entity.type == "mention" and message.text[entity.offset:entity.offset + entity.length] == f"@{botinfo.username}"
- for entity in message.entities or [])
- if is_allowed_user and message.text and (is_private_chat or (is_supergroup and bot_mentioned)):
- if is_supergroup and bot_mentioned:
- cutmention = len(botinfo.username) + 2
- text = message.text[cutmention:] # + ""
- print(text)
- else:
- text = message.text
- print(text)
- await bot.send_chat_action(message.chat.id, "typing")
- full_response = ""
- sent_message = None
- last_sent_text = None
- async for response_data in stream_request(text):
- chunk = response_data.get("response", "")
- full_response += chunk
- if '.' in chunk or '\n' in chunk or '!' in chunk or '?' in chunk:
- if sent_message:
- if last_sent_text != full_response:
- try:
- await sent_message.edit_text(full_response)
- last_sent_text = full_response
- except aiogram.exceptions.TelegramBadRequest as e:
- if "message is not modified" in str(e):
- pass
- else:
- raise
- else:
- sent_message = await message.answer(full_response)
- last_sent_text = full_response
- if response_data.get("done"):
- if full_response.strip() and last_sent_text != full_response:
- if sent_message:
- try:
- await sent_message.edit_text(full_response)
- except aiogram.exceptions.TelegramBadRequest as e:
- if "message is not modified" in str(e):
- pass
- else:
- raise
- else:
- sent_message = await message.answer(full_response)
- escaped_response = escape_html(full_response)
- await sent_message.edit_text(escaped_response + "
Model:", - parse_mode="HTML") - break - - -async def main(): - await dp.start_polling(bot, skip_update=True) - - -if __name__ == "__main__": - asyncio.run(main()) diff --git a/bot/run.py b/bot/run.py new file mode 100644 index 0000000..170d693 --- /dev/null +++ b/bot/run.py @@ -0,0 +1,111 @@ +from func.controller import * +import asyncio, re +from aiogram import Bot, Dispatcher, types +from aiogram.filters.command import CommandStart +from aiogram.utils.keyboard import InlineKeyboardBuilder +from aiogram.types import Message +bot = Bot(token=token) +dp = Dispatcher() +builder = InlineKeyboardBuilder() +builder.row(types.InlineKeyboardButton(text="š¤ļø Information", callback_data="info"), + types.InlineKeyboardButton(text="āļø Settings", callback_data="modelmanager")) + +modelname = os.getenv('INITMODEL') +@dp.message(CommandStart()) +async def command_start_handler(message: Message) -> None: + if message.from_user.id in allowed_ids: + await message.answer( + f"Welcome to Ollama-Telegram Bot, {message.from_user.full_name}!\nSource code: https://github.com/ruecat/ollama-telegram", + parse_mode="HTML", reply_markup=builder.as_markup(), disable_web_page_preview=True) + else: + await message.answer( + f"{message.from_user.full_name} -
[Auth-Blocked]
\nContact staff to whitelist you.",
+ parse_mode="HTML")
+
+@dp.callback_query(lambda query: query.data == 'modelmanager')
+async def modelmanager_callback_handler(query: types.CallbackQuery):
+ if query.from_user.id in admin_ids:
+ models = await fetcher()
+ modelmanager_builder = InlineKeyboardBuilder()
+ for model in models:
+ modelname = model['name']
+ # Add a button for each model
+ modelmanager_builder.row(
+ types.InlineKeyboardButton(text=modelname, callback_data=f"model_{modelname}")
+ )
+ await query.message.edit_text(
+ "Choose model",
+ reply_markup=modelmanager_builder.as_markup()
+ )
+ else:
+ await query.answer("Access Denied")
+
+
+@dp.callback_query(lambda query: query.data.startswith('model_'))
+async def model_callback_handler(query: types.CallbackQuery):
+ global modelname
+ modelname = query.data.split('model_')[1]
+ await query.answer(f"Chosen model: {modelname}")
+@dp.callback_query(lambda query: query.data == 'info')
+async def systeminfo_callback_handler(query: types.CallbackQuery):
+ if query.from_user.id in admin_ids:
+ await bot.send_message(chat_id=query.message.chat.id,
+ text=f"š¦ LLM\nCurrent model: {modelname}
\n\nš§ Hardware\nKernel: {system_info[0]}\n
\n(Other options will be added soon..)",
+ parse_mode="HTML")
+ else:
+ await query.answer("Access Denied")
+
+
+
+@dp.message()
+async def handle_message(message: types.Message):
+ botinfo = await bot.get_me()
+ is_allowed_user = message.from_user.id in allowed_ids
+ is_private_chat = message.chat.type == "private"
+ is_supergroup = message.chat.type == "supergroup"
+ bot_mentioned = any(
+ entity.type == "mention" and message.text[entity.offset:entity.offset + entity.length] == f"@{botinfo.username}"
+ for entity in message.entities or [])
+ if is_allowed_user and message.text and (is_private_chat or (is_supergroup and bot_mentioned)):
+ if is_supergroup and bot_mentioned:
+ cutmention = len(botinfo.username) + 2
+ prompt = message.text[cutmention:] # + ""
+ else:
+ prompt = message.text
+ await bot.send_chat_action(message.chat.id, "typing")
+ full_response = ""
+ sent_message = None
+ last_sent_text = None
+ async for response_data in streamer(prompt, modelname):
+ chunk = response_data.get("response", "")
+ full_response += chunk
+ full_response_stripped = full_response.strip()
+
+ if '.' in chunk or '\n' in chunk or '!' in chunk or '?' in chunk:
+ if sent_message:
+ if last_sent_text != full_response_stripped:
+ await sent_message.edit_text(full_response_stripped)
+ last_sent_text = full_response_stripped
+ else:
+ sent_message = await message.answer(
+ full_response_stripped)
+ last_sent_text = full_response_stripped
+
+ if response_data.get("done"):
+ if full_response_stripped and last_sent_text != full_response_stripped:
+ if sent_message:
+ await sent_message.edit_text(full_response_stripped)
+ else:
+ sent_message = await message.answer(
+ full_response_stripped)
+ format = re.sub(r'```(.*?)```', r'\1', full_response_stripped, flags=re.DOTALL) + await sent_message.edit_text(format + f"
šŖ Current Model: {modelname}", + parse_mode="HTML") + break + +async def main(): + await dp.start_polling(bot, skip_update=True) + + +if __name__ == "__main__": + asyncio.run(main())