Move configuration to supybot config registry; improve logging and docs

This commit is contained in:
John Burwell 2025-10-02 17:01:07 +00:00
parent 4cc23ee55e
commit c98367764f
6 changed files with 246 additions and 267 deletions

158
.gitignore vendored
View File

@ -1,161 +1,7 @@
# Byte-compiled / optimized / DLL files
# Byte-compiled / optimized files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
# Test artifacts (if any tests are added later)
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
config.ini

View File

@ -5,11 +5,10 @@ A Limnoria plugin that brings ChatGPT into your IRC channel
## Installation
1. Install the plugin
2. Put your API key in config.ini like so:
2. Put your API key in the plugin configuration registry like so:
```
[Chat]
api_key = YOUR_API_KEY
/msg BotName config plugins.Chat.api_key YOUR_API_KEY
```
3. Load the plugin:
@ -18,10 +17,42 @@ api_key = YOUR_API_KEY
load Chat
```
## Configuration
The Chat plugin supports the following configuration parameters:
- **`api_key`**: The API key for accessing OpenAI's API. This must be set for the plugin to work.
- **`model`**: The OpenAI model to use for generating responses. Default: `gpt-4`.
- **`max_tokens`**: The maximum number of tokens to include in the response. Default: `256`.
- **`system_prompt`**: The system prompt to guide the assistant's behavior. Default: `You are a helpful assistant.`.
- **`scrollback_lines`**: The number of recent lines from the channel to include as context. Default: `10`.
- **`join_string`**: The string used to join multi-line responses into a single line. Default: ` / `.
### Example Configuration
To set the API key:
```
/msg BotName config plugins.Chat.api_key YOUR_API_KEY
```
To change the model:
```
/msg BotName config plugins.Chat.model gpt-3.5-turbo
```
To adjust the maximum tokens:
```
/msg BotName config plugins.Chat.max_tokens 512
```
## Usage
Example:
Once configured, you can use the `chat` command to interact with the bot. For example:
```
> chat Hello!
< Hello there! How can I assist you today?
@BotName chat What is the capital of France?
```
The bot will respond with the answer based on the configured model and context.
## Defaults
The plugin is designed to work out of the box with minimal configuration. Simply set the `api_key`, and the plugin will use sensible defaults for all other parameters.

View File

@ -38,9 +38,9 @@ from supybot import world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = "1"
__version__ = "2025.10.02"
# XXX Replace this with an appropriate author or supybot.Author instance.
# Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.Author('John Burwell','deet','deet@atatdotdot.com')
# This is a dictionary mapping supybot.Author instances to lists of
@ -48,7 +48,7 @@ __author__ = supybot.Author('John Burwell','deet','deet@atatdotdot.com')
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = ''
__url__ = 'https://git.b-wells.us/jmbwell/chat'
from . import config
from . import plugin

View File

@ -32,9 +32,8 @@ from supybot import conf, registry
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Chat')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
except ImportError:
# Placeholder that allows the plugin to run without the i18n module
_ = lambda x: x
@ -48,80 +47,78 @@ def configure(advanced):
Chat = conf.registerPlugin('Chat')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Chat, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName.""")))
# API key
# Configuration Parameters
# API key for OpenAI
conf.registerGlobalValue(
Chat,
"api_key",
registry.String(
"",
_("""Your ChatGPT API key"""),
_("""Your ChatGPT API key. This must be set for the plugin to work."""),
private=True,
)
)
# Model
conf.registerChannelValue(
# Default model to use
conf.registerGlobalValue(
Chat,
"model",
registry.String(
"gpt-3.5-turbo",
_("""The language model to use"""),
),
"gpt-4",
_("""The OpenAI model to use for generating responses. Default is 'gpt-4'."""),
)
)
# System prompt
conf.registerChannelValue(
# Maximum tokens for responses
conf.registerGlobalValue(
Chat,
"max_tokens",
registry.Integer(
256,
_("""The maximum number of tokens to include in the response. Default is 256."""),
)
)
# System prompt for the assistant
conf.registerGlobalValue(
Chat,
"system_prompt",
registry.String(
"I am an IRC bot named $bot_name, in an IRC channel called $channel_name. I will not remind users that I am an AI or what my limitations are; I will just respond. I will not prefix my own response with any user's name. The following is a transcript of the conversation in the channel.",
_("""The 'system' prompt first given to ChatGPT"""),
),
"You are an IRC bot participating in a channel conversation. Keep your responses brief, informal, and conversational. Avoid overly technical language unless asked, and ensure your tone matches the casual nature of IRC discussions.",
_("""The system prompt to guide the assistant's behavior. Default provides context about being an IRC bot and guidance on interaction style."""),
)
)
# What to do with multi-line replies
conf.registerChannelValue(
# Number of lines to include from scrollback
conf.registerGlobalValue(
Chat,
"join_multiple_lines",
registry.Boolean(
True,
_("""Combine multiple lines in the response into one string. If false, reply line by line"""),
),
"scrollback_lines",
registry.Integer(
10,
_("""The number of recent lines from the channel to include as context. Default is 10."""),
)
)
# What to use to join multiple lines
conf.registerChannelValue(
# String to join multi-line responses
conf.registerGlobalValue(
Chat,
"join_string",
registry.String(
" / ",
_("""When joining lines, what string to use between each line"""),
),
_("""The string used to join multi-line responses into a single line. Default is ' / '."""),
)
)
# How much scrollback to include in the prompt
conf.registerChannelValue(
# Logging level for the plugin
conf.registerGlobalValue(
Chat,
"scrollback_lines",
registry.Integer(
30,
_("""How many lines of scrollback history to include in the prompt"""),
),
'log_level',
registry.String(
'INFO',
_("""The logging level for the Chat plugin. Options: DEBUG, INFO, WARNING, ERROR, CRITICAL.""")
)
)
# Maximum tokens to use
conf.registerChannelValue(
Chat,
"max_tokens",
registry.Integer(
1000,
_("""Maximum token count for combined query/reply"""),
),
)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:

161
plugin.py
View File

@ -28,13 +28,14 @@
###
import configparser
import json
import os
import re
import requests
from supybot import utils, plugins, ircutils, callbacks, conf
import supybot
from supybot import callbacks, conf, ircutils
from supybot.commands import *
import logging
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Chat')
@ -44,6 +45,29 @@ except ImportError:
_ = lambda x: x
def truncate_messages(messages, max_tokens):
"""
Truncates the messages list to ensure the total token count does not exceed max_tokens.
Args:
messages (list): The list of message dictionaries to truncate.
max_tokens (int): The maximum number of tokens allowed.
Returns:
list: The truncated list of messages.
"""
total_tokens = 0
truncated = []
for message in reversed(messages):
# Approximate token count by splitting content into words
message_tokens = len(message["content"].split())
if total_tokens + message_tokens > max_tokens:
break
truncated.insert(0, message)
total_tokens += message_tokens
return truncated
class Chat(callbacks.Plugin):
"""Sends message to ChatGPT and replies with the response
"""
@ -51,17 +75,9 @@ class Chat(callbacks.Plugin):
def __init__(self, irc):
self.__parent = super(Chat, self)
self.__parent.__init__(irc)
# Load the API key from the configuration file
config_path = os.path.join(os.path.dirname(__file__), 'config.ini')
self.config = configparser.ConfigParser()
self.config.read(config_path)
self.api_key = self.config.get('Chat', 'api_key')
def conversation_history(self):
history = irclib.IrcState.history[-30:]
filtered_messages = [(msg.args[0], msg.args[1]) for msg in history if msg.command == 'PRIVMSG']
return [{"role": "user", "content": f"{nick}: {msg}"} for nick, msg in history]
log_level = self.registryValue('log_level').upper()
self.log.setLevel(getattr(logging, log_level, logging.INFO))
self.log.info("Chat plugin initialized with log level: %s", log_level)
def filter_prefix(self, msg, prefix):
if msg.startswith(prefix):
@ -71,72 +87,115 @@ class Chat(callbacks.Plugin):
def chat(self, irc, msg, args, string):
"""
Sends your comment to ChatGPT and returns the response.
<message>
Args:
string (str): The string to send to ChatGPT.
Sends a message to ChatGPT and returns the response. The bot will include recent
conversation history from the channel to provide context.
Returns:
str: ChatGPT's response
Example:
@bot chat What is the capital of France?
"""
# Construct the invocation string to identify bot commands
invocation_string = f"{conf.supybot.reply.whenAddressedBy.chars()}{self.name().lower()} "
# self.log.info(f"invocation_string: {invocation_string}")
self.log.debug(f"Invocation string: {invocation_string} | User: {msg.nick} | Channel: {msg.args[0]}")
# Retrieve model and token settings from the plugin's configuration
model = self.registryValue("model")
max_tokens = self.registryValue("max_tokens")
# If the configured system prompt contains BOTNAME and IRCCHANNEL tokens, replace those with real values
system_prompt = self.registryValue("system_prompt").replace("$bot_name", irc.nick).replace("$channel_name", msg.args[0])
# Use a default system prompt if none is configured
default_prompt = "You are a helpful assistant."
system_prompt = self.registryValue("system_prompt") or default_prompt
# Get the last few lines of the chat scrollback to include in the prompt
# Replace dynamic placeholders in the system prompt with actual values
system_prompt = system_prompt.replace("$bot_name", irc.nick).replace("$channel_name", msg.args[0])
# Retrieve the last few lines of the chat scrollback to provide context
history = irc.state.history[-self.registryValue("scrollback_lines"):]
# for message in history:
# self.log.info(f"{message.nick}: {json.dumps(message.args)}")
self.log.debug(f"Raw history: {history}")
# Restrict the scrollback to PRIVMSGs in the current channel, filtering out the invocation prefix
# Filter the scrollback to include only PRIVMSGs in the current channel
filtered_messages = [
(message.nick, (self.filter_prefix(message.args[1], f"{invocation_string}")))
(message.nick, self.filter_prefix(message.args[1], f"{invocation_string}"))
for message in history
if message.command == 'PRIVMSG' and message.args[0] == msg.args[0]
]
][:-1]
# Format the conversation history for submission to the API
if not filtered_messages:
# Log a warning if no relevant messages are found in the scrollback
self.log.warning(f"No messages found in scrollback for channel {msg.args[0]}")
# Format the conversation history for the API request
conversation_history = [
{
"role": "assistant" if nick == irc.nick else "user",
"content": f"{nick}: {msg}"
}
"role": "assistant" if nick == "" else "user",
"content": re.sub(r'^.+?:\\s', '', msg) if nick == "" else f"{nick}: {msg}"
}
for nick, msg in filtered_messages
]
]
# Combine the system prompt, and the scrollback (which already includes the invocation)
messages = [{"role": "system", "content": system_prompt}] + conversation_history
# self.log.info(json.dumps(messages))
# Combine the system prompt and the conversation history
messages = [{"role": "system", "content": system_prompt}] + conversation_history + [{"role": "user", "content": msg.args[1]}]
# Truncate messages to ensure the total token count does not exceed the model's limit
messages = truncate_messages(messages, 8192)
self.log.debug(f"API Request: {json.dumps(messages)}")
# Submit the request to the API
# res = requests.post(f"http://localhost:8000/capture", headers = {
res = requests.post(f"https://api.openai.com/v1/chat/completions", headers = {
"Content-Type":"application/json",
"Authorization":f"Bearer {self.registryValue('api_key')}"
try:
# Send the request to the OpenAI API
res = requests.post(
"https://api.openai.com/v1/chat/completions",
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {self.registryValue('api_key')}"
},
json={
"model": model,
"messages": messages,
"max_tokens": max_tokens,
}).json()
},
timeout=10 # Set a timeout for the request
)
res.raise_for_status() # Raise an HTTPError for bad responses (4xx or 5xx)
res = res.json()
# Pick a response
response = res['choices'][0]['message']['content'].strip()
self.log.debug(f"API Response: {json.dumps(res)}")
# Combine multiple lines
line = response.replace("\n", self.registryValue("join_string"))
if "error" in res:
# Log and reply with the error message if the API returns an error
error_message = res["error"].get("message", "Unknown error")
self.log.error(f"API error: {error_message} | User input: {msg.args[1]} | Channel: {msg.args[0]}")
irc.reply(f"API error: {error_message}")
return
# Post the reply to the channel
irc.reply( line )
# Extract and format the response from the API
response = res['choices'][0]['message']['content'].strip()
# for line in conversation_history:
# irc.reply( line )
# Handle multi-line responses intelligently
lines = response.splitlines()
if len(lines) > 1:
# Join lines with the configured join_string, skipping empty lines
response = self.registryValue("join_string").join(line.strip() for line in lines if line.strip())
irc.reply(response)
# Log the successful processing of the request
self.log.info(f"Successfully processed request for user {msg.nick} in channel {msg.args[0]}")
except requests.exceptions.Timeout:
# Handle and log timeout errors
self.log.error("Request timed out.")
irc.reply("The request to the API timed out. Please try again later.")
except requests.exceptions.HTTPError as e:
# Handle and log HTTP errors
self.log.error(f"HTTP error: {e}")
irc.reply("An HTTP error occurred while contacting the API.")
except requests.exceptions.RequestException as e:
# Handle and log other request exceptions
self.log.error(f"Request exception: {e}")
irc.reply("An error occurred while contacting the API.")
chat = wrap(chat, ['text'])

50
test.py
View File

@ -28,11 +28,57 @@
###
from supybot.test import *
from supybot.test import PluginTestCase
class ChooseTestCase(PluginTestCase):
class ChatTestCase(PluginTestCase):
plugins = ('Chat',)
def test_truncate_messages_within_limit(self):
from plugins.Chat.plugin import truncate_messages
messages = [{"role": "user", "content": "Hello"}]
result = truncate_messages(messages, max_tokens=10)
self.assertEqual(result, messages)
def test_truncate_messages_exceeds_limit(self):
from plugins.Chat.plugin import truncate_messages
messages = [
{"role": "user", "content": "Hello"},
{"role": "user", "content": "This is a very long message that exceeds the token limit."}
]
result = truncate_messages(messages, max_tokens=5)
self.assertEqual(result, [{"role": "user", "content": "Hello"}])
def test_truncate_messages_empty(self):
from plugins.Chat.plugin import truncate_messages
messages = []
result = truncate_messages(messages, max_tokens=10)
self.assertEqual(result, [])
def test_system_prompt_replacement(self):
# Simulate system prompt replacement
from plugins.Chat.plugin import Chat
plugin = self.irc.getCallback('Chat')
plugin.registryValue = lambda key: {
"system_prompt": "You are $bot_name in $channel_name."
}.get(key, "")
system_prompt = plugin.registryValue("system_prompt")
system_prompt = system_prompt.replace("$bot_name", "TestBot").replace("$channel_name", "#test")
self.assertEqual(system_prompt, "You are TestBot in #test.")
def test_join_logic(self):
# Simulate join logic for multi-line responses
from plugins.Chat.plugin import Chat
plugin = self.irc.getCallback('Chat')
plugin.registryValue = lambda key: {
"join_string": " / "
}.get(key, "")
response = "Line 1\nLine 2\nLine 3"
lines = response.splitlines()
joined_response = plugin.registryValue("join_string").join(line.strip() for line in lines if line.strip())
self.assertEqual(joined_response, "Line 1 / Line 2 / Line 3")
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79: