Move configuration to supybot config registry; improve logging and docs

This commit is contained in:
John Burwell 2025-10-02 17:01:07 +00:00
parent 4cc23ee55e
commit c98367764f
6 changed files with 246 additions and 267 deletions

158
.gitignore vendored
View File

@ -1,161 +1,7 @@
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized files
__pycache__/ __pycache__/
*.py[cod] *.py[cod]
*$py.class
# C extensions # Test artifacts (if any tests are added later)
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage .coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/ .pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
config.ini

View File

@ -5,11 +5,10 @@ A Limnoria plugin that brings ChatGPT into your IRC channel
## Installation ## Installation
1. Install the plugin 1. Install the plugin
2. Put your API key in config.ini like so: 2. Put your API key in the plugin configuration registry like so:
``` ```
[Chat] /msg BotName config plugins.Chat.api_key YOUR_API_KEY
api_key = YOUR_API_KEY
``` ```
3. Load the plugin: 3. Load the plugin:
@ -18,10 +17,42 @@ api_key = YOUR_API_KEY
load Chat load Chat
``` ```
## Configuration
The Chat plugin supports the following configuration parameters:
- **`api_key`**: The API key for accessing OpenAI's API. This must be set for the plugin to work.
- **`model`**: The OpenAI model to use for generating responses. Default: `gpt-4`.
- **`max_tokens`**: The maximum number of tokens to include in the response. Default: `256`.
- **`system_prompt`**: The system prompt to guide the assistant's behavior. Default: `You are a helpful assistant.`.
- **`scrollback_lines`**: The number of recent lines from the channel to include as context. Default: `10`.
- **`join_string`**: The string used to join multi-line responses into a single line. Default: ` / `.
### Example Configuration
To set the API key:
```
/msg BotName config plugins.Chat.api_key YOUR_API_KEY
```
To change the model:
```
/msg BotName config plugins.Chat.model gpt-3.5-turbo
```
To adjust the maximum tokens:
```
/msg BotName config plugins.Chat.max_tokens 512
```
## Usage ## Usage
Example: Once configured, you can use the `chat` command to interact with the bot. For example:
``` ```
> chat Hello! @BotName chat What is the capital of France?
< Hello there! How can I assist you today?
``` ```
The bot will respond with the answer based on the configured model and context.
## Defaults
The plugin is designed to work out of the box with minimal configuration. Simply set the `api_key`, and the plugin will use sensible defaults for all other parameters.

View File

@ -38,9 +38,9 @@ from supybot import world
# Use this for the version of this plugin. You may wish to put a CVS keyword # Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system. # in here if you're keeping the plugin in CVS or some similar system.
__version__ = "1" __version__ = "2025.10.02"
# XXX Replace this with an appropriate author or supybot.Author instance. # Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.Author('John Burwell','deet','deet@atatdotdot.com') __author__ = supybot.Author('John Burwell','deet','deet@atatdotdot.com')
# This is a dictionary mapping supybot.Author instances to lists of # This is a dictionary mapping supybot.Author instances to lists of
@ -48,7 +48,7 @@ __author__ = supybot.Author('John Burwell','deet','deet@atatdotdot.com')
__contributors__ = {} __contributors__ = {}
# This is a url where the most recent plugin package can be downloaded. # This is a url where the most recent plugin package can be downloaded.
__url__ = '' __url__ = 'https://git.b-wells.us/jmbwell/chat'
from . import config from . import config
from . import plugin from . import plugin

View File

@ -32,9 +32,8 @@ from supybot import conf, registry
try: try:
from supybot.i18n import PluginInternationalization from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Chat') _ = PluginInternationalization('Chat')
except: except ImportError:
# Placeholder that allows to run the plugin on a bot # Placeholder that allows the plugin to run without the i18n module
# without the i18n module
_ = lambda x: x _ = lambda x: x
@ -48,80 +47,78 @@ def configure(advanced):
Chat = conf.registerPlugin('Chat') Chat = conf.registerPlugin('Chat')
# This is where your configuration variables (if any) should go. For example:
# conf.registerGlobalValue(Chat, 'someConfigVariableName',
# registry.Boolean(False, _("""Help for someConfigVariableName.""")))
# API key # Configuration Parameters
# API key for OpenAI
conf.registerGlobalValue( conf.registerGlobalValue(
Chat, Chat,
"api_key", "api_key",
registry.String( registry.String(
"", "",
_("""Your ChatGPT API key"""), _("""Your ChatGPT API key. This must be set for the plugin to work."""),
private=True, private=True,
) )
) )
# Model # Default model to use
conf.registerChannelValue( conf.registerGlobalValue(
Chat, Chat,
"model", "model",
registry.String( registry.String(
"gpt-3.5-turbo", "gpt-4",
_("""The language model to use"""), _("""The OpenAI model to use for generating responses. Default is 'gpt-4'."""),
), )
) )
# System prompt # Maximum tokens for responses
conf.registerChannelValue( conf.registerGlobalValue(
Chat,
"max_tokens",
registry.Integer(
256,
_("""The maximum number of tokens to include in the response. Default is 256."""),
)
)
# System prompt for the assistant
conf.registerGlobalValue(
Chat, Chat,
"system_prompt", "system_prompt",
registry.String( registry.String(
"I am an IRC bot named $bot_name, in an IRC channel called $channel_name. I will not remind users that I am an AI or what my limitations are; I will just respond. I will not prefix my own response with any user's name. The following is a transcript of the conversation in the channel.", "You are an IRC bot participating in a channel conversation. Keep your responses brief, informal, and conversational. Avoid overly technical language unless asked, and ensure your tone matches the casual nature of IRC discussions.",
_("""The 'system' prompt first given to ChatGPT"""), _("""The system prompt to guide the assistant's behavior. Default provides context about being an IRC bot and guidance on interaction style."""),
), )
) )
# What to do with multi-line replies # Number of lines to include from scrollback
conf.registerChannelValue( conf.registerGlobalValue(
Chat, Chat,
"join_multiple_lines", "scrollback_lines",
registry.Boolean( registry.Integer(
True, 10,
_("""Combine multiple lines in the response into one string. If false, reply line by line"""), _("""The number of recent lines from the channel to include as context. Default is 10."""),
), )
) )
# What to use to join multiple lines # String to join multi-line responses
conf.registerChannelValue( conf.registerGlobalValue(
Chat, Chat,
"join_string", "join_string",
registry.String( registry.String(
" / ", " / ",
_("""When joining lines, what string to use between each line"""), _("""The string used to join multi-line responses into a single line. Default is ' / '."""),
), )
) )
# How much scrollback to include in the prompt # Logging level for the plugin
conf.registerChannelValue( conf.registerGlobalValue(
Chat, Chat,
"scrollback_lines", 'log_level',
registry.Integer( registry.String(
30, 'INFO',
_("""How many lines of scrollback history to include in the prompt"""), _("""The logging level for the Chat plugin. Options: DEBUG, INFO, WARNING, ERROR, CRITICAL.""")
), )
) )
# Maximum tokens to use
conf.registerChannelValue(
Chat,
"max_tokens",
registry.Integer(
1000,
_("""Maximum token count for combined query/reply"""),
),
)
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79: # vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:

165
plugin.py
View File

@ -28,13 +28,14 @@
### ###
import configparser
import json import json
import os import re
import requests import requests
import supybot
from supybot import utils, plugins, ircutils, callbacks, conf from supybot import callbacks, conf, ircutils
from supybot.commands import * from supybot.commands import *
import logging
try: try:
from supybot.i18n import PluginInternationalization from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Chat') _ = PluginInternationalization('Chat')
@ -44,6 +45,29 @@ except ImportError:
_ = lambda x: x _ = lambda x: x
def truncate_messages(messages, max_tokens):
"""
Truncates the messages list to ensure the total token count does not exceed max_tokens.
Args:
messages (list): The list of message dictionaries to truncate.
max_tokens (int): The maximum number of tokens allowed.
Returns:
list: The truncated list of messages.
"""
total_tokens = 0
truncated = []
for message in reversed(messages):
# Approximate token count by splitting content into words
message_tokens = len(message["content"].split())
if total_tokens + message_tokens > max_tokens:
break
truncated.insert(0, message)
total_tokens += message_tokens
return truncated
class Chat(callbacks.Plugin): class Chat(callbacks.Plugin):
"""Sends message to ChatGPT and replies with the response """Sends message to ChatGPT and replies with the response
""" """
@ -51,92 +75,127 @@ class Chat(callbacks.Plugin):
def __init__(self, irc): def __init__(self, irc):
self.__parent = super(Chat, self) self.__parent = super(Chat, self)
self.__parent.__init__(irc) self.__parent.__init__(irc)
log_level = self.registryValue('log_level').upper()
self.log.setLevel(getattr(logging, log_level, logging.INFO))
self.log.info("Chat plugin initialized with log level: %s", log_level)
# Load the API key from the configuration file
config_path = os.path.join(os.path.dirname(__file__), 'config.ini')
self.config = configparser.ConfigParser()
self.config.read(config_path)
self.api_key = self.config.get('Chat', 'api_key')
def conversation_history(self):
history = irclib.IrcState.history[-30:]
filtered_messages = [(msg.args[0], msg.args[1]) for msg in history if msg.command == 'PRIVMSG']
return [{"role": "user", "content": f"{nick}: {msg}"} for nick, msg in history]
def filter_prefix(self, msg, prefix): def filter_prefix(self, msg, prefix):
if msg.startswith(prefix): if msg.startswith(prefix):
return msg[len(prefix):] return msg[len(prefix):]
else: else:
return msg return msg
def chat(self, irc, msg, args, string): def chat(self, irc, msg, args, string):
""" """
Sends your comment to ChatGPT and returns the response. <message>
Args: Sends a message to ChatGPT and returns the response. The bot will include recent
string (str): The string to send to ChatGPT. conversation history from the channel to provide context.
Returns: Example:
str: ChatGPT's response @bot chat What is the capital of France?
""" """
# Construct the invocation string to identify bot commands
invocation_string = f"{conf.supybot.reply.whenAddressedBy.chars()}{self.name().lower()} " invocation_string = f"{conf.supybot.reply.whenAddressedBy.chars()}{self.name().lower()} "
# self.log.info(f"invocation_string: {invocation_string}") self.log.debug(f"Invocation string: {invocation_string} | User: {msg.nick} | Channel: {msg.args[0]}")
# Retrieve model and token settings from the plugin's configuration
model = self.registryValue("model") model = self.registryValue("model")
max_tokens = self.registryValue("max_tokens") max_tokens = self.registryValue("max_tokens")
# If the configured system prompt contains BOTNAME and IRCCHANNEL tokens, replace those with real values # Use a default system prompt if none is configured
system_prompt = self.registryValue("system_prompt").replace("$bot_name", irc.nick).replace("$channel_name", msg.args[0]) default_prompt = "You are a helpful assistant."
system_prompt = self.registryValue("system_prompt") or default_prompt
# Get the last few lines of the chat scrollback to include in the prompt # Replace dynamic placeholders in the system prompt with actual values
system_prompt = system_prompt.replace("$bot_name", irc.nick).replace("$channel_name", msg.args[0])
# Retrieve the last few lines of the chat scrollback to provide context
history = irc.state.history[-self.registryValue("scrollback_lines"):] history = irc.state.history[-self.registryValue("scrollback_lines"):]
# for message in history: self.log.debug(f"Raw history: {history}")
# self.log.info(f"{message.nick}: {json.dumps(message.args)}")
# Restrict the scrollback to PRIVMSGs in the current channel, filtering out the invocation prefix # Filter the scrollback to include only PRIVMSGs in the current channel
filtered_messages = [ filtered_messages = [
(message.nick, (self.filter_prefix(message.args[1], f"{invocation_string}"))) (message.nick, self.filter_prefix(message.args[1], f"{invocation_string}"))
for message in history for message in history
if message.command == 'PRIVMSG' and message.args[0] == msg.args[0] if message.command == 'PRIVMSG' and message.args[0] == msg.args[0]
] ][:-1]
# Format the conversation history for submission to the API if not filtered_messages:
# Log a warning if no relevant messages are found in the scrollback
self.log.warning(f"No messages found in scrollback for channel {msg.args[0]}")
# Format the conversation history for the API request
conversation_history = [ conversation_history = [
{ {
"role": "assistant" if nick == irc.nick else "user", "role": "assistant" if nick == "" else "user",
"content": f"{nick}: {msg}" "content": re.sub(r'^.+?:\\s', '', msg) if nick == "" else f"{nick}: {msg}"
} }
for nick, msg in filtered_messages for nick, msg in filtered_messages
] ]
# Combine the system prompt, and the scrollback (which already includes the invocation) # Combine the system prompt and the conversation history
messages = [{"role": "system", "content": system_prompt}] + conversation_history messages = [{"role": "system", "content": system_prompt}] + conversation_history + [{"role": "user", "content": msg.args[1]}]
# self.log.info(json.dumps(messages)) # Truncate messages to ensure the total token count does not exceed the model's limit
messages = truncate_messages(messages, 8192)
self.log.debug(f"API Request: {json.dumps(messages)}")
# Submit the request to the API try:
# res = requests.post(f"http://localhost:8000/capture", headers = { # Send the request to the OpenAI API
res = requests.post(f"https://api.openai.com/v1/chat/completions", headers = { res = requests.post(
"Content-Type":"application/json", "https://api.openai.com/v1/chat/completions",
"Authorization":f"Bearer {self.registryValue('api_key')}" headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {self.registryValue('api_key')}"
}, },
json={ json={
"model": model, "model": model,
"messages": messages, "messages": messages,
"max_tokens": max_tokens, "max_tokens": max_tokens,
}).json() },
timeout=10 # Set a timeout for the request
)
res.raise_for_status() # Raise an HTTPError for bad responses (4xx or 5xx)
res = res.json()
# Pick a response self.log.debug(f"API Response: {json.dumps(res)}")
response = res['choices'][0]['message']['content'].strip()
# Combine multiple lines if "error" in res:
line = response.replace("\n", self.registryValue("join_string")) # Log and reply with the error message if the API returns an error
error_message = res["error"].get("message", "Unknown error")
self.log.error(f"API error: {error_message} | User input: {msg.args[1]} | Channel: {msg.args[0]}")
irc.reply(f"API error: {error_message}")
return
# Post the reply to the channel # Extract and format the response from the API
irc.reply( line ) response = res['choices'][0]['message']['content'].strip()
# for line in conversation_history: # Handle multi-line responses intelligently
# irc.reply( line ) lines = response.splitlines()
if len(lines) > 1:
# Join lines with the configured join_string, skipping empty lines
response = self.registryValue("join_string").join(line.strip() for line in lines if line.strip())
irc.reply(response)
# Log the successful processing of the request
self.log.info(f"Successfully processed request for user {msg.nick} in channel {msg.args[0]}")
except requests.exceptions.Timeout:
# Handle and log timeout errors
self.log.error("Request timed out.")
irc.reply("The request to the API timed out. Please try again later.")
except requests.exceptions.HTTPError as e:
# Handle and log HTTP errors
self.log.error(f"HTTP error: {e}")
irc.reply("An HTTP error occurred while contacting the API.")
except requests.exceptions.RequestException as e:
# Handle and log other request exceptions
self.log.error(f"Request exception: {e}")
irc.reply("An error occurred while contacting the API.")
chat = wrap(chat, ['text']) chat = wrap(chat, ['text'])

50
test.py
View File

@ -28,11 +28,57 @@
### ###
from supybot.test import * from supybot.test import PluginTestCase
class ChooseTestCase(PluginTestCase): class ChatTestCase(PluginTestCase):
plugins = ('Chat',) plugins = ('Chat',)
def test_truncate_messages_within_limit(self):
from plugins.Chat.plugin import truncate_messages
messages = [{"role": "user", "content": "Hello"}]
result = truncate_messages(messages, max_tokens=10)
self.assertEqual(result, messages)
def test_truncate_messages_exceeds_limit(self):
from plugins.Chat.plugin import truncate_messages
messages = [
{"role": "user", "content": "Hello"},
{"role": "user", "content": "This is a very long message that exceeds the token limit."}
]
result = truncate_messages(messages, max_tokens=5)
self.assertEqual(result, [{"role": "user", "content": "Hello"}])
def test_truncate_messages_empty(self):
from plugins.Chat.plugin import truncate_messages
messages = []
result = truncate_messages(messages, max_tokens=10)
self.assertEqual(result, [])
def test_system_prompt_replacement(self):
# Simulate system prompt replacement
from plugins.Chat.plugin import Chat
plugin = self.irc.getCallback('Chat')
plugin.registryValue = lambda key: {
"system_prompt": "You are $bot_name in $channel_name."
}.get(key, "")
system_prompt = plugin.registryValue("system_prompt")
system_prompt = system_prompt.replace("$bot_name", "TestBot").replace("$channel_name", "#test")
self.assertEqual(system_prompt, "You are TestBot in #test.")
def test_join_logic(self):
# Simulate join logic for multi-line responses
from plugins.Chat.plugin import Chat
plugin = self.irc.getCallback('Chat')
plugin.registryValue = lambda key: {
"join_string": " / "
}.get(key, "")
response = "Line 1\nLine 2\nLine 3"
lines = response.splitlines()
joined_response = plugin.registryValue("join_string").join(line.strip() for line in lines if line.strip())
self.assertEqual(joined_response, "Line 1 / Line 2 / Line 3")
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79: # vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79: