use limnoria config system

This commit is contained in:
John Burwell 2023-08-31 03:17:18 +00:00
parent 5965c21b04
commit 1d2f48b0f4
2 changed files with 117 additions and 17 deletions

View File

@ -47,10 +47,81 @@ def configure(advanced):
conf.registerPlugin('Chat', True) conf.registerPlugin('Chat', True)
Choose = conf.registerPlugin('Chat') Chat = conf.registerPlugin('Chat')
# This is where your configuration variables (if any) should go. For example: # This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Choose, 'someConfigVariableName', # conf.registerGlobalValue(Chat, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName."""))) # registry.Boolean(False, _("""Help for someConfigVariableName.""")))
# API key
conf.registerGlobalValue(
Chat,
"api_key",
registry.String(
"",
_("""Your ChatGPT API key"""),
private=True,
)
)
# Model
conf.registerChannelValue(
Chat,
"model",
registry.String(
"gpt-3.5-turbo",
_("""The language model to use"""),
),
)
# System prompt
conf.registerChannelValue(
Chat,
"system_prompt",
registry.String(
"I am an IRC bot named $bot_name, in an IRC channel called $channel_name. I will not remind users that I am an AI or what my limitations are; I will just respond. I will not prefix my own response with any user's name. The following is a transcript of the conversation in the channel.",
_("""The 'system' prompt first given to ChatGPT"""),
),
)
# What to do with multi-line replies
conf.registerChannelValue(
Chat,
"join_multiple_lines",
registry.Boolean(
True,
_("""Combine multiple lines in the response into one string. If false, reply line by line"""),
),
)
# What to use to join multiple lines
conf.registerChannelValue(
Chat,
"join_string",
registry.String(
" / ",
_("""When joining lines, what string to use between each line"""),
),
)
# How much scrollback to include in the prompt
conf.registerChannelValue(
Chat,
"scrollback_lines",
registry.Integer(
30,
_("""How many lines of scrollback history to include in the prompt"""),
),
)
# Maximum tokens to use
conf.registerChannelValue(
Chat,
"max_tokens",
registry.Integer(
1000,
_("""Maximum token count for combined query/reply"""),
),
)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79: # vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:

View File

@ -29,12 +29,11 @@
### ###
import configparser import configparser
import json
import os import os
import random
import re
import requests import requests
from supybot import utils, plugins, ircutils, callbacks from supybot import utils, plugins, ircutils, callbacks, conf
from supybot.commands import * from supybot.commands import *
try: try:
from supybot.i18n import PluginInternationalization from supybot.i18n import PluginInternationalization
@ -69,41 +68,71 @@ class Chat(callbacks.Plugin):
return msg[len(prefix):] return msg[len(prefix):]
else: else:
return msg return msg
def chat(self, irc, msg, args, string): def chat(self, irc, msg, args, string):
""" """
Sends your comment to ChatGPT and returns the response. Sends your comment to ChatGPT and returns the response.
Args: Args:
string (str): The string to send to chatGPT. string (str): The string to send to ChatGPT.
Returns: Returns:
str: ChatGPT's response str: ChatGPT's response
""" """
model = "gpt-3.5-turbo" invocation_string = f"{conf.supybot.reply.whenAddressedBy.chars()}{self.name().lower()} "
# self.log.info(f"invocation_string: {invocation_string}")
system_prompt = "I am an IRC bot named magicvoice, in an IRC channel called ##huffaz. Everyone knows I am an AI and what my limitations are. The following is the last 30 messages exchanged in the channel among users, including myself." model = self.registryValue("model")
max_tokens = self.registryValue("max_tokens")
history = irc.state.history[-30:] # If the configured system prompt contains BOTNAME and IRCCHANNEL tokens, replace those with real values
filtered_messages = [(msg.args[0], self.filter_prefix(msg.args[1], "chat ")) for msg in history if msg.command == 'PRIVMSG'] system_prompt = self.registryValue("system_prompt").replace("$bot_name", irc.nick).replace("$channel_name", msg.args[0])
conversation_history = [{"role": "user", "content": f"{nick}: {msg}"} for nick, msg in filtered_messages]
messages = [{"role": "system", "content": system_prompt}] + conversation_history + [{"role": "user", "content": string}] # Get the last few lines of the chat scrollback to include in the prompt
history = irc.state.history[-self.registryValue("scrollback_lines"):]
for message in history:
self.log.info(f"{message.nick}: {json.dumps(message.args)}")
res = requests.post(f"https://api.openai.com/v1/chat/completions", headers = { # Restrict the scrollback to PRIVMSGs in the current channel, filtering out the invocation prefix
filtered_messages = [
(message.nick, (self.filter_prefix(message.args[1], f"{invocation_string}")))
for message in history
if message.command == 'PRIVMSG' and message.args[0] == msg.args[0]
]
# Format the conversation history for submission to the API
conversation_history = [
{
"role": "assistant" if nick == irc.nick else "user",
"content": f"{nick}: {msg}"
}
for nick, msg in filtered_messages
]
# Combine the system prompt, and the scrollback (which already includes the invocation)
messages = [{"role": "system", "content": system_prompt}] + conversation_history
self.log.info(json.dumps(messages))
# Submit the request to the API
# res = requests.post(f"http://localhost:8000/capture", headers = { # res = requests.post(f"http://localhost:8000/capture", headers = {
res = requests.post(f"https://api.openai.com/v1/chat/completions", headers = {
"Content-Type":"application/json", "Content-Type":"application/json",
"Authorization":f"Bearer {self.api_key}" "Authorization":f"Bearer {self.registryValue('api_key')}"
}, },
json={ json={
"model": model, "model": model,
"messages": messages, "messages": messages,
"max_tokens":1000 "max_tokens": max_tokens,
}).json() }).json()
# Pick a response
response = res['choices'][0]['message']['content'].strip() response = res['choices'][0]['message']['content'].strip()
line = response.replace("\n", " / ")
# Combine multiple lines
line = response.replace("\n", self.registryValue("join_string"))
# Post the reply to the channel
irc.reply( line ) irc.reply( line )
# for line in conversation_history: # for line in conversation_history: