Skip to content

Commit

Permalink
Store messages
Browse files Browse the repository at this point in the history
  • Loading branch information
OmidH committed Dec 22, 2023
1 parent 1efbc6a commit dfe56af
Show file tree
Hide file tree
Showing 4 changed files with 111 additions and 24 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,3 +24,4 @@ config.yaml

# Log files
*.log
.DS_Store
56 changes: 34 additions & 22 deletions my_project_name/bot_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
import json
from urllib.parse import urljoin
from nio import AsyncClient, MatrixRoom, RoomMessageText
from my_project_name.conversation_store import ConversationStore, MessageType, Role
from my_project_name.helper import prepare_msg, validate_url
from my_project_name.parser.parser import get_main_content
from my_project_name.storage import Storage
# from my_project_name.storage import Storage
from my_project_name.config import Config
from my_project_name.chat_functions import react_to_event, send_text_to_room, send_typing_to_room

Expand All @@ -15,7 +16,7 @@ class Command:
def __init__(
self,
client: AsyncClient,
store: Storage,
store: ConversationStore,
config: Config,
command: str,
room: MatrixRoom,
Expand Down Expand Up @@ -63,16 +64,19 @@ async def process(self):
else:
await self._query_llm()
# await self._unknown_command()

async def _query_for_code(self):
"""Make the bot forward the query to llm for code generation and wait for an answer"""
model = "deepseek-coder-6.7b-instruct:latest"
message = " ".join(self.args[0::]).strip()

await self.send_llm_message(model="deepseek-coder-6.7b-instruct:latest", message=f"Please generate a short and accurate code snippet based on the following specifications. The code should be precise, efficient, and adhere closely to the requirements. Ensure the solution is concise and to the point.\n{message}")
prompt = f"Please generate a short and accurate code snippet based on the following specifications. The code should be precise, efficient, and adhere closely to the requirements. Ensure the solution is concise and to the point.\n{message}"
logger.info(self.event.event_id)
self.store.add_message(message, self.event.sender, Role.USER, MessageType.CODE, None, prompt, self.event.event_id)
await self.send_llm_message(model=model, message=prompt, messageType=MessageType.CODE, event_id=self.event.event_id)

async def _query_for_available_llms(self):
await send_typing_to_room(self.client, self.room.room_id, True)
try:
try:
headers = {
'Content-Type': 'application/json'
}
Expand Down Expand Up @@ -108,39 +112,46 @@ async def _query_llm_for_summery(self):
await send_text_to_room(self.client, self.room.room_id, f"The given URL is invalid\n>{link}", markdown_convert=True)

content = get_main_content(parsed_url)

# logger.info(f"summery 1: {link} 2: {message} 3: {content}")
await self.send_llm_message(model="mistral-7b-instruct:latest", message=f"Please provide a brief summary of the following content, ensuring to use the same language as the original. Keep the summary concise.\n\n---\n{content}\n---\nEnd of content.")
model = "mistral-7b-instruct:latest"
prompt = f"Please provide a brief summary of the following content, ensuring to use the same language as the original. Keep the summary concise.\n\n---\n{content}\n---\nEnd of content."
self.store.add_message(parsed_url, self.event.sender, Role.USER, MessageType.LINK, model, prompt, self.event.event_id)
await self.send_llm_message(model=model, message=prompt, messageType=MessageType.LINK, event_id=self.event.event_id)

async def _query_llm_with_name(self):
"""Make the bot forward the query to a specific llm and wait for an answer"""
model = None
if self.args:
if self.args:
model = self.args[0]

message = " ".join(self.args[1::])
await self.send_llm_message(model=model, message=message)


self.store.add_message(message, self.event.sender, Role.USER, MessageType.CUSTOM, model, None, self.event.event_id)
await self.send_llm_message(model=model, message=message, messageType=MessageType.CUSTOM, event_id=self.event.event_id)


async def _query_llm(self):
"""Make the bot forward the query to llm and wait for an answer"""
await self.send_llm_message(message=" ".join(self.args))
message = " ".join(self.args)
self.store.add_message(message, self.event.sender, Role.USER, MessageType.DEFAULT, self.config.llm_model, None, self.event.event_id)
await self.send_llm_message(message=message, event_id=self.event.event_id)

async def send_llm_message(self, model=None, message=''):
async def send_llm_message(self, model=None, message='', messageType=MessageType.DEFAULT, event_id=None):
await send_typing_to_room(self.client, self.room.room_id, True, 60000)

llm_param_stop = []
if self.config.llm_param_stop is not "" and model is None:
if self.config.llm_param_stop != "" and model is None:
llm_param_stop.append(self.config.llm_param_stop)

model_name = self.config.llm_model
if model is not None:
model_name = model

prompt = prepare_msg(self.config.llm_msg_template, message) if model is None else message

try:
payload = {
"model": model_name,
"prompt": prepare_msg(self.config.llm_msg_template, message) if model is None else message,
"prompt": prompt,
"stream": False,
"options": {
"seed": self.config.llm_param_seed,
Expand All @@ -154,7 +165,7 @@ async def send_llm_message(self, model=None, message=''):
"num_ctx": self.config.llm_param_num_ctx,
}
}

headers = {
'Content-Type': 'application/json'
}
Expand All @@ -165,7 +176,8 @@ async def send_llm_message(self, model=None, message=''):
json_data = response.json()
await send_typing_to_room(self.client, self.room.room_id, False)
response = (json_data['response'])
# logger.info(response)

self.store.add_message(response, self.client.user_id, Role.ASSISTANT, messageType, model_name, prompt, event_id)
await send_text_to_room(self.client, self.room.room_id, response, markdown_convert=True)

if "eval_duration" in json_data and "eval_count" in json_data:
Expand All @@ -174,13 +186,13 @@ async def send_llm_message(self, model=None, message=''):
if eval_dur != 0 and eval_cnt != 0:
toks_per_sek = eval_cnt / (eval_dur / 1e9)
await send_text_to_room(self.client, self.room.room_id, f'>Your request has been answered by `{model_name}` and took {round((eval_dur)/1000000000, 3)} seconds and generated {round(toks_per_sek, 3)} tokens/s')
else:
else:
await send_text_to_room(self.client, self.room.room_id,'>Your request took some time but couldn\'t calculate the token generation rate due to zero values of eval_duration or eval_count')
else:
await send_typing_to_room(self.client, self.room.room_id, False)
await send_text_to_room(self.client, self.room.room_id, f"An error occurred while fetching the API({response.status_code}): {response}")
except requests.RequestException as e :

except requests.RequestException as e :
await send_typing_to_room(self.client, self.room.room_id, False)
await send_text_to_room(self.client, self.room.room_id, f"An unknown error: {e}")
print('Error Occurred', e)
Expand Down
72 changes: 72 additions & 0 deletions my_project_name/conversation_store.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@

from typing import Dict
from my_project_name.storage import Storage
from enum import Enum


class Role(Enum):
USER = 'user'
SYSTEM = 'system'
ASSISTANT = 'assistant'

class MessageType(Enum):
LINK = 'link'
CODE = 'code'
CUSTOM = 'custome'
DEFAULT = 'default'


class ConversationStore(Storage):
def __init__(self, database_config: Dict[str, str]):
super().__init__(database_config)
self._init_db()

def _init_db(self):
# Create the messages table if it doesn't exist
self._execute('''
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
role TEXT,
content TEXT,
user TEXT,
model TEXT,
messageType TEXT,
prompt TEXT,
event_id TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
)
''')

def add_message(self, content, user, role: Role, messageType: MessageType, model=None, prompt=None, event_id=None):
# Ensure that the role is an instance of the Role enum
if not isinstance(role, Role):
raise ValueError("role must be an instance of Role enum")

if not isinstance(messageType, MessageType):
raise ValueError("messageType must be an instance of MessageType enum")

self._execute('''
INSERT INTO messages (role, content, user, model, messageType, prompt, event_id)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', (role.value, content, user, model, messageType.value, prompt, event_id))

def get_last_five_messages(self, user=None, messageType=None, limit=5):

if user is None and messageType is None:
raise('Please provide any search criteria.')

query = "SELECT role, content, user, model, messageType, prompt, event_id FROM messages "
params = ()
if user is not None and messageType is not None:
query += "WHERE user = ? and messageType = ?"
params = (user, messageType)
if user is not None and messageType is None:
query += "WHERE user = ? "
params = (user,)
if user is None and messageType is not None:
query += "WHERE messageType = ? "
params = (messageType,)
query += f"ORDER BY id DESC LIMIT {limit}"

self._execute(query, params)
return list(reversed(self.cursor.fetchall()))
6 changes: 4 additions & 2 deletions my_project_name/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,12 @@
RoomMessageText,
UnknownEvent,
)
# from my_project_name.storage import Storage
from my_project_name.conversation_store import ConversationStore

from my_project_name.callbacks import Callbacks
from my_project_name.config import Config
from my_project_name.storage import Storage


logger = logging.getLogger(__name__)

Expand All @@ -37,7 +39,7 @@ async def main():
config = Config(config_path)

# Configure the database
store = Storage(config.database)
store = ConversationStore(database_config=config.database)

# Configuration options for the AsyncClient
client_config = AsyncClientConfig(
Expand Down

0 comments on commit dfe56af

Please sign in to comment.