feat: add Commands enum, delete useless commands, revamp the Settings class and the MarkovChain.message_handler method

This commit is contained in:
cătălin 2024-11-01 04:40:17 +01:00
commit 3a33411dd9
No known key found for this signature in database
18 changed files with 1111 additions and 1190 deletions

36
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,36 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: trailing-whitespace
args: [ --markdown-linebreak-ext=md ]
- id: end-of-file-fixer
- id: check-ast
- id: check-added-large-files
- id: check-byte-order-marker
- id: check-case-conflict
- id: check-docstring-first
- id: check-merge-conflict
- id: check-toml
- id: debug-statements
- id: mixed-line-ending
args: [ --fix=lf ]
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.6.4
hooks:
- id: ruff
args:
- --fix
- --exit-non-zero-on-fix
- id: ruff-format
- repo: local
hooks:
- id: mypy
name: mypy
entry: uv run mypy
language: system
types: [ python ]

9
Makefile Normal file
View file

@ -0,0 +1,9 @@
fmt:
uvx pre-commit run --all-files --color always
.PHONY: tests
tests:
uv run pytest --cov=halig -vv tests --report-log reportlog.json
uv run coverage html
uv run coverage xml

View file

@ -36,4 +36,3 @@ exe = EXE(
codesign_identity=None, codesign_identity=None,
entitlements_file=None, entitlements_file=None,
) )

View file

@ -13,11 +13,44 @@ dependencies = [
"nltk>=3.9.1", "nltk>=3.9.1",
"pillow>=10.4.0", "pillow>=10.4.0",
"platformdirs>=4.3.6", "platformdirs>=4.3.6",
"pydantic>=2.9.2",
"pydantic-settings>=2.6.0",
"pyinstaller>=6.11.0", "pyinstaller>=6.11.0",
"twitchwebsocket>=1.2.1", "twitchwebsocket>=1.2.1",
"loguru>=0.7.2",
] ]
[tool.uv] [tool.uv]
dev-dependencies = [ dev-dependencies = [
"mypy>=1.13.0",
"pyright>=1.1.387",
"ruff>=0.7.0", "ruff>=0.7.0",
] ]
[[tool.mypy.overrides]]
module = [
"kivy",
"kivy.uix.widget",
"kivy.uix.popup",
"kivy.uix.button",
"kivy.uix.boxlayout",
"kivy.uix.textinput",
"kivy.uix.label",
"kivy.metrics",
"kivy.app",
"kivy.clock",
"nltk",
"nltk.tokenize",
"nltk.tokenize.treebank",
"nltk.tokenize.destructive",
"TwitchWebsocket",
"tokenizer"
]
ignore_missing_imports = true
[tool.ruff.lint]
extend-select = [
"W", "C90", "I", "N", "UP", "S", "BLE", "B", "A", "COM", "C4", "DTZ", "T10", "EM", "ISC", "T20", "PT", "RSE", "RET",
"SIM", "PTH", "ERA", "PGH", "PL", "RUF", "FURB", "PERF"
]
extend-ignore = ["S101", "ISC002", "COM812", "ISC001"]

View file

@ -1,37 +1,64 @@
import json
import logging
import queue import queue
import threading import threading
from pathlib import Path from pathlib import Path
from traceback import print_exc
from kivy.clock import Clock from kivy.clock import Clock
from kivy.metrics import dp from kivy.metrics import dp
from kivy.uix.boxlayout import BoxLayout from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button from kivy.uix.button import Button
from kivy.uix.textinput import TextInput from kivy.uix.textinput import TextInput
from loguru import logger
from src.markovbot_gui.libs.MarkovChainBot import MarkovChain from src.markovbot_gui.libs.markov_chain_bot import MarkovChain
from src.markovbot_gui.log_handler import LogHandler from src.markovbot_gui.libs.settings import Settings
class QueueHandler:
def __init__(self, queue):
self.queue = queue
def write(self, message):
self.queue.put(message)
def flush(self):
pass
class BotRunner(BoxLayout): class BotRunner(BoxLayout):
def __init__(self, config_path: Path, **kwargs): def __init__(self, settings_path: Path, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
self.config_path = config_path self.settings_path = settings_path
self.orientation = "vertical" self.orientation = "vertical"
self.spacing = dp(10) self.spacing = dp(10)
self.padding = dp(20) self.padding = dp(20)
self.bot_thread = None self.bot_thread = None
self.bot_instance = None self.log_queue: queue.Queue = queue.Queue()
self.log_queue = queue.Queue() self.settings = Settings.read(self.settings_path)
# Create log display self.queue_handler = QueueHandler(self.log_queue)
self.log_display = TextInput(multiline=True, readonly=True, size_hint=(1, 1)) logger.remove()
logger.add(
self.queue_handler,
format="{time:YYYY-MM-DD HH:mm:ss} | {level} | {message}",
level=self.settings.log_level,
)
self.log_display = TextInput(
multiline=True,
readonly=True,
size_hint=(1, 1),
background_color=[0.1, 0.1, 0.1, 1], # Dark background
foreground_color=[0.9, 0.9, 0.9, 1], # Light text
)
self.add_widget(self.log_display) self.add_widget(self.log_display)
# Create button layout # Create button layout
button_layout = BoxLayout( button_layout = BoxLayout(
orientation="horizontal", size_hint=(1, None), height=dp(40), spacing=dp(10) orientation="horizontal",
size_hint=(1, None),
height=dp(40),
spacing=dp(10),
) )
# Create start button # Create start button
@ -48,65 +75,45 @@ class BotRunner(BoxLayout):
text="Stop Bot", text="Stop Bot",
size_hint=(None, None), size_hint=(None, None),
size=(dp(100), dp(40)), size=(dp(100), dp(40)),
disabled=True, # Initially disabled as bot isn't running disabled=True,
) )
self.stop_button.bind(on_release=self.stop_bot) self.stop_button.bind(on_release=self.stop_bot)
button_layout.add_widget(self.stop_button) button_layout.add_widget(self.stop_button)
# Create clear log button
self.clear_button = Button(
text="Clear Log",
size_hint=(None, None),
size=(dp(100), dp(40)),
)
self.clear_button.bind(on_release=self.clear_log)
button_layout.add_widget(self.clear_button)
self.add_widget(button_layout) self.add_widget(button_layout)
# Start log update scheduling
Clock.schedule_interval(self.update_log, 0.1) Clock.schedule_interval(self.update_log, 0.1)
# Configure logging
self.setup_logging()
def setup_logging(self):
# Create and configure the log handler
handler = LogHandler(self.log_queue)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
# Get root logger and add our handler
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
def start_bot(self, instance=None): def start_bot(self, instance=None):
try: try:
# Load configuration
if not self.config_path.exists():
raise FileNotFoundError(
f"Configuration file not found at {self.config_path}"
)
with open(self.config_path) as f:
config = json.load(f)
# Create and start bot thread # Create and start bot thread
self.bot_thread = threading.Thread( self.bot_thread = threading.Thread(target=self.run_bot_thread, daemon=True)
target=self.run_bot_thread, args=(config,), daemon=True
)
self.bot_thread.start() self.bot_thread.start()
# Update button states
self.start_button.disabled = True self.start_button.disabled = True
self.stop_button.disabled = False self.stop_button.disabled = False
logging.info("Starting bot...") logger.info("Starting bot...")
except Exception as e: except Exception as e: # noqa: BLE001
logging.error(f"Failed to start bot: {e}") logger.error(f"Failed to start bot: {e}")
def run_bot_thread(self, config): def run_bot_thread(self):
try: try:
self.bot = MarkovChain(self.config_path) self.bot = MarkovChain(self.settings)
self.bot.run_bot() self.bot.run_bot()
except Exception: # noqa: BLE001
except Exception as e: logger.exception("Bot error")
logging.error(f"Bot error: {e}")
finally: finally:
# Always re-enable start button and disable stop button when bot stops
Clock.schedule_once(lambda dt: self.reset_button_states(), 0) Clock.schedule_once(lambda dt: self.reset_button_states(), 0)
def stop_bot(self, _=None): def stop_bot(self, _=None):
@ -114,19 +121,34 @@ class BotRunner(BoxLayout):
# Wait for thread to finish # Wait for thread to finish
if self.bot_thread and self.bot_thread.is_alive(): if self.bot_thread and self.bot_thread.is_alive():
self.bot_thread.join(timeout=1.0) self.bot_thread.join(timeout=3.0)
logging.info("Bot stopped") logger.info("Bot stopped")
self.reset_button_states() self.reset_button_states()
def reset_button_states(self): def reset_button_states(self):
self.start_button.disabled = False self.start_button.disabled = False
self.stop_button.disabled = True self.stop_button.disabled = True
def clear_log(self, instance=None):
self.log_display.text = ""
logger.info("Log cleared")
def update_log(self, dt): def update_log(self, dt):
# Get all new log messages try:
while not self.log_queue.empty(): while not self.log_queue.empty():
message = self.log_queue.get() message = self.log_queue.get_nowait()
self.log_display.text += message + "\n" if message.strip(): # Only add non-empty messages
# Auto-scroll to bottom self.log_display.text += message
self.log_display.cursor = (0, len(self.log_display.text))
# Keep only the last 1000 lines to prevent memory issues
lines = self.log_display.text.split("\n")
if len(lines) > 1000: # noqa: PLR2004
self.log_display.text = "\n".join(lines[-1000:]) + "\n"
# Auto-scroll to bottom
self.log_display.cursor = (0, len(self.log_display.text))
except queue.Empty:
pass
except Exception: # noqa: BLE001
print_exc()

View file

@ -43,41 +43,51 @@ class ConfigWindow(BoxLayout):
try: try:
if config_path.exists(): if config_path.exists():
with open(config_path) as f: with config_path.open("r") as f:
saved_config = json.load(f) saved_config = json.load(f)
# Update self.default_config with saved values # Update self.default_config with saved values
self.default_config.update(saved_config) self.default_config.update(saved_config)
except json.JSONDecodeError: except json.JSONDecodeError:
logging.error(f"Failed to parse config file at {config_path}") logging.exception(f"Failed to parse config file at {config_path}")
except Exception as e: except Exception:
logging.error(f"Error loading config file: {e}") logging.exception("Error loading config file")
# Create widgets # Create widgets
# Channel input # Channel input
channel_layout = BoxLayout( channel_layout = BoxLayout(
orientation="horizontal", size_hint_y=None, height=dp(40) orientation="horizontal",
size_hint_y=None,
height=dp(40),
) )
channel_label = Label(text="Channel:", size_hint_x=0.3) channel_label = Label(text="Channel:", size_hint_x=0.3)
self.channel_input = TextInput( self.channel_input = TextInput(
multiline=False, size_hint_x=0.7, text=self.default_config["Channel"] multiline=False,
size_hint_x=0.7,
text=self.default_config["Channel"],
) )
channel_layout.add_widget(channel_label) channel_layout.add_widget(channel_label)
channel_layout.add_widget(self.channel_input) channel_layout.add_widget(self.channel_input)
# Nickname input # Nickname input
nickname_layout = BoxLayout( nickname_layout = BoxLayout(
orientation="horizontal", size_hint_y=None, height=dp(40) orientation="horizontal",
size_hint_y=None,
height=dp(40),
) )
nickname_label = Label(text="Nickname:", size_hint_x=0.3) nickname_label = Label(text="Nickname:", size_hint_x=0.3)
self.nickname_input = TextInput( self.nickname_input = TextInput(
multiline=False, size_hint_x=0.7, text=self.default_config["Nickname"] multiline=False,
size_hint_x=0.7,
text=self.default_config["Nickname"],
) )
nickname_layout.add_widget(nickname_label) nickname_layout.add_widget(nickname_label)
nickname_layout.add_widget(self.nickname_input) nickname_layout.add_widget(self.nickname_input)
# Authentication input # Authentication input
auth_layout = BoxLayout( auth_layout = BoxLayout(
orientation="horizontal", size_hint_y=None, height=dp(40) orientation="horizontal",
size_hint_y=None,
height=dp(40),
) )
auth_label = Label(text="Auth:", size_hint_x=0.3) auth_label = Label(text="Auth:", size_hint_x=0.3)
self.auth_input = TextInput( self.auth_input = TextInput(
@ -129,11 +139,11 @@ class ConfigWindow(BoxLayout):
Clock.schedule_once(success_popup.dismiss, 1) Clock.schedule_once(success_popup.dismiss, 1)
except Exception as e: except Exception as e: # noqa: BLE001
# Show error message if saving fails # Show error message if saving fails
error_popup = Popup( error_popup = Popup(
title="Error", title="Error",
content=Label(text=f"Failed to save configuration:\n{str(e)}"), content=Label(text=f"Failed to save configuration:\n{e!s}"),
size_hint=(None, None), size_hint=(None, None),
size=(dp(400), dp(150)), size=(dp(400), dp(150)),
) )

View file

@ -1,29 +0,0 @@
import logging
import os
import logging.config
class Log:
def __init__(self, main_file: str):
# Dynamically change size set up for name in the logger
this_file = os.path.basename(main_file)
from src.markovbot_gui.libs.Settings import Settings
# If you have a logging config like me, use it
if "PYTHON_LOGGING_CONFIG" in os.environ:
logging.config.fileConfig(
os.environ.get("PYTHON_LOGGING_CONFIG"),
defaults={
"logfilename": this_file.replace(".py", "_")
+ Settings.get_channel()
+ ".log"
},
disable_existing_loggers=False,
)
else:
# If you don't, use a standard config that outputs some INFO in the console
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] [%(name)s] [%(levelname)-8s] - %(message)s",
)

View file

@ -1,675 +0,0 @@
from pathlib import Path
from typing import List, Tuple
from TwitchWebsocket import Message, TwitchWebsocket
from nltk.tokenize import sent_tokenize
import socket
import time
import logging
import re
import string
from src.markovbot_gui.libs.Settings import Settings, get_settings
from src.markovbot_gui.libs.Database import Database
from src.markovbot_gui.libs.Timer import LoopingTimer
from src.markovbot_gui.libs.Tokenizer import detokenize, tokenize
from src.markovbot_gui.libs.Log import Log
Log(__file__)
logger = logging.getLogger(__name__)
class MarkovChain:
def __init__(self, settings_path: Path | None = None):
self.settings_path = settings_path
self.prev_message_t = 0
self._enabled = True
# This regex should detect similar phrases as links as Twitch does
self.link_regex = re.compile("\w+\.[a-z]{2,}")
# List of moderators used in blacklist modification, includes broadcaster
self.mod_list = []
self.set_blacklist()
# Fill previously initialised variables with data from the settings.txt file
self.settings = get_settings(settings_path=self.settings_path)
self.set_settings()
self.db = Database(self.chan)
# Set up daemon Timer to send help messages
if self.help_message_timer > 0:
if self.help_message_timer < 300:
raise ValueError(
'Value for "HelpMessageTimer" in must be at least 300 seconds, or a negative number for no help messages.'
)
t = LoopingTimer(self.help_message_timer, self.send_help_message)
t.start()
# Set up daemon Timer to send automatic generation messages
if self.automatic_generation_timer > 0:
if self.automatic_generation_timer < 30:
raise ValueError(
'Value for "AutomaticGenerationMessage" in must be at least 30 seconds, or a negative number for no automatic generations.'
)
t = LoopingTimer(
self.automatic_generation_timer, self.send_automatic_generation_message
)
t.start()
self.ws = TwitchWebsocket(
host=self.host,
port=self.port,
chan=self.chan,
nick=self.nick,
auth=self.auth,
callback=self.message_handler,
capability=["commands", "tags"],
live=True,
)
def run_bot(self):
self.ws.start_bot()
def stop_bot(self):
self.ws.stop()
def set_settings(self):
settings = self.settings.read_settings()
self.host = settings["Host"]
self.port = settings["Port"]
self.chan = settings["Channel"]
self.nick = settings["Nickname"]
self.auth = settings["Authentication"]
self.denied_users = [user.lower() for user in settings["DeniedUsers"]] + [
self.nick.lower()
]
self.allowed_users = [user.lower() for user in settings["AllowedUsers"]]
self.cooldown = settings["Cooldown"]
self.key_length = settings["KeyLength"]
self.max_sentence_length = settings["MaxSentenceWordAmount"]
self.min_sentence_length = settings["MinSentenceWordAmount"]
self.help_message_timer = settings["HelpMessageTimer"]
self.automatic_generation_timer = settings["AutomaticGenerationTimer"]
self.whisper_cooldown = settings["WhisperCooldown"]
self.enable_generate_command = settings["EnableGenerateCommand"]
self.sent_separator = settings["SentenceSeparator"]
self.allow_generate_params = settings["AllowGenerateParams"]
self.generate_commands = tuple(settings["GenerateCommands"])
def message_handler(self, m: Message):
try:
if m.type == "366":
logger.info(f"Successfully joined channel: #{m.channel}")
# Get the list of mods used for modifying the blacklist
logger.info("Fetching mod list...")
self.ws.send_message("/mods")
elif m.type == "NOTICE":
# Check whether the NOTICE is a response to our /mods request
if m.message.startswith("The moderators of this channel are:"):
string_list = m.message.replace(
"The moderators of this channel are:", ""
).strip()
self.mod_list = [m.channel] + string_list.split(", ")
logger.info(
f"Fetched mod list. Found {len(self.mod_list) - 1} mods."
)
elif m.message == "There are no moderators of this channel.":
self.mod_list = [m.channel]
logger.info("Fetched mod list. Found no mods.")
# If it is not, log this NOTICE
else:
logger.info(m.message)
elif m.type in ("PRIVMSG", "WHISPER"):
if m.message.startswith("!enable") and self.check_if_permissions(m):
if self._enabled:
self.ws.send_whisper(
m.user, "The generate command is already enabled."
)
else:
self.ws.send_whisper(
m.user, "Users can now use generate command again."
)
self._enabled = True
logger.info("Users can now use generate command again.")
elif m.message.startswith("!disable") and self.check_if_permissions(m):
if self._enabled:
self.ws.send_whisper(
m.user, "Users can now no longer use generate command."
)
self._enabled = False
logger.info("Users can now no longer use generate command.")
else:
self.ws.send_whisper(
m.user, "The generate command is already disabled."
)
elif m.message.startswith(
("!setcooldown", "!setcd")
) and self.check_if_permissions(m):
split_message = m.message.split(" ")
if len(split_message) == 2:
try:
cooldown = int(split_message[1])
except ValueError:
self.ws.send_whisper(
m.user,
"The parameter must be an integer amount, eg: !setcd 30",
)
return
self.cooldown = cooldown
Settings.update_cooldown(cooldown)
self.ws.send_whisper(
m.user,
f"The !generate cooldown has been set to {cooldown} seconds.",
)
else:
self.ws.send_whisper(
m.user,
"Please add exactly 1 integer parameter, eg: !setcd 30.",
)
if m.type == "PRIVMSG":
# Ignore bot messages
if m.user.lower() in self.denied_users:
return
if self.check_if_generate(m.message):
if (
not self.enable_generate_command
and not self.check_if_permissions(m)
):
return
if not self._enabled:
if not self.db.check_whisper_ignore(m.user):
self.send_whisper(
m.user,
"The !generate has been turned off. !nopm to stop me from whispering you.",
)
return
cur_time = time.time()
if (
self.prev_message_t + self.cooldown < cur_time
or self.check_if_permissions(m)
):
if self.check_filter(m.message):
sentence = "You can't make me say that, you madman!"
else:
params = (
tokenize(m.message)[2:]
if self.allow_generate_params
else None
)
# Generate an actual sentence
sentence, success = self.generate(params)
if success:
# Reset cooldown if a message was actually generated
self.prev_message_t = time.time()
logger.info(sentence)
self.ws.send_message(sentence)
else:
if not self.db.check_whisper_ignore(m.user):
self.send_whisper(
m.user,
f"Cooldown hit: {self.prev_message_t + self.cooldown - cur_time:0.2f} out of {self.cooldown:.0f}s remaining. !nopm to stop these cooldown pm's.",
)
logger.info(
f"Cooldown hit with {self.prev_message_t + self.cooldown - cur_time:0.2f}s remaining."
)
return
# Send help message when requested.
elif m.message.startswith(("!ghelp", "!genhelp", "!generatehelp")):
self.send_help_message()
# Ignore the message if it is deemed a command
elif self.check_if_other_command(m.message):
return
# Ignore the message if it contains a link.
elif self.check_link(m.message):
return
if "emotes" in m.tags:
# If the list of emotes contains "emotesv2_", then the message contains a bit emote,
# and we choose not to learn from those messages.
if "emotesv2_" in m.tags["emotes"]:
return
# Replace modified emotes with normal versions,
# as the bot will never have the modified emotes unlocked at the time.
for modifier in self.extract_modifiers(m.tags["emotes"]):
m.message = m.message.replace(modifier, "")
# Ignore the message if any word in the sentence is on the ban filter
if self.check_filter(m.message):
logger.warning(
f'Sentence contained blacklisted word or phrase:"{m.message}"'
)
return
else:
# Try to split up sentences. Requires nltk's 'punkt' resource
try:
sentences = sent_tokenize(m.message.strip())
# If 'punkt' is not downloaded, then download it, and retry
except LookupError:
logger.debug("Downloading required punkt resource...")
import nltk
nltk.download("punkt")
logger.debug("Downloaded required punkt resource.")
sentences = sent_tokenize(m.message.strip())
for sentence in sentences:
# Get all seperate words
words = tokenize(sentence)
# Double spaces will lead to invalid rules. We remove empty words here
if "" in words:
words = [word for word in words if word]
# If the sentence is too short, ignore it and move on to the next.
if len(words) <= self.key_length:
continue
# Add a new starting point for a sentence to the <START>
# self.db.add_rule(["<START>"] + [words[x] for x in range(self.key_length)])
self.db.add_start_queue(
[words[x] for x in range(self.key_length)]
)
# Create Key variable which will be used as a key in the Dictionary for the grammar
key = list()
for word in words:
# Set up key for first use
if len(key) < self.key_length:
key.append(word)
continue
self.db.add_rule_queue(key + [word])
# Remove the first word, and add the current word,
# so that the key is correct for the next word.
key.pop(0)
key.append(word)
# Add <END> at the end of the sentence
self.db.add_rule_queue(key + ["<END>"])
elif m.type == "WHISPER":
# Allow people to whisper the bot to disable or enable whispers.
if m.message == "!nopm":
logger.debug(f"Adding {m.user} to Do Not Whisper.")
self.db.add_whisper_ignore(m.user)
self.ws.send_whisper(
m.user,
"You will no longer be sent whispers. Type !yespm to reenable. ",
)
elif m.message == "!yespm":
logger.debug(f"Removing {m.user} from Do Not Whisper.")
self.db.remove_whisper_ignore(m.user)
self.ws.send_whisper(
m.user,
"You will again be sent whispers. Type !nopm to disable again. ",
)
# Note that I add my own username to this list to allow me to manage the
# blacklist in channels of my bot in channels I am not modded in.
# I may modify this and add a "allowed users" field in the settings file.
elif (
m.user.lower() in self.mod_list + ["cubiedev"] + self.allowed_users
):
# Adding to the blacklist
if self.check_if_our_command(m.message, "!blacklist"):
if len(m.message.split()) == 2:
# TODO: Remove newly blacklisted word from the Database
word = m.message.split()[1].lower()
self.blacklist.append(word)
logger.info(f"Added `{word}` to Blacklist.")
self.write_blacklist(self.blacklist)
self.ws.send_whisper(m.user, "Added word to Blacklist.")
else:
self.ws.send_whisper(
m.user,
"Expected Format: `!blacklist word` to add `word` to the blacklist",
)
# Removing from the blacklist
elif self.check_if_our_command(m.message, "!whitelist"):
if len(m.message.split()) == 2:
word = m.message.split()[1].lower()
try:
self.blacklist.remove(word)
logger.info(f"Removed `{word}` from Blacklist.")
self.write_blacklist(self.blacklist)
self.ws.send_whisper(
m.user, "Removed word from Blacklist."
)
except ValueError:
self.ws.send_whisper(
m.user, "Word was already not in the blacklist."
)
else:
self.ws.send_whisper(
m.user,
"Expected Format: `!whitelist word` to remove `word` from the blacklist.",
)
# Checking whether a word is in the blacklist
elif self.check_if_our_command(m.message, "!check"):
if len(m.message.split()) == 2:
word = m.message.split()[1].lower()
if word in self.blacklist:
self.ws.send_whisper(
m.user, "This word is in the Blacklist."
)
else:
self.ws.send_whisper(
m.user, "This word is not in the Blacklist."
)
else:
self.ws.send_whisper(
m.user,
"Expected Format: `!check word` to check whether `word` is on the blacklist.",
)
elif m.type == "CLEARMSG":
# If a message is deleted, its contents will be unlearned
# or rather, the "occurances" attribute of each combinations of words in the sentence
# is reduced by 5, and deleted if the occurances is now less than 1.
self.db.unlearn(m.message)
# TODO: Think of some efficient way to check whether it was our message that got deleted.
# If the bot's message was deleted, log this as an error
# if m.user.lower() == self.nick.lower():
# logger.error(f"This bot message was deleted: \"{m.message}\"")
except Exception as e:
logger.exception(e)
def generate(self, params: List[str] = None) -> "Tuple[str, bool]":
"""Given an input sentence, generate the remainder of the sentence using the learned data.
Args:
params (List[str]): A list of words to use as an input to use as the start of generating.
Returns:
Tuple[str, bool]: A tuple of a sentence as the first value, and a boolean indicating
whether the generation succeeded as the second value.
"""
if params is None:
params = []
# List of sentences that will be generated. In some cases, multiple sentences will be generated,
# e.g. when the first sentence has less words than self.min_sentence_length.
sentences = [[]]
# Check for commands or recursion, eg: !generate !generate
if len(params) > 0:
if self.check_if_other_command(params[0]):
return "You can't make me do commands, you madman!", False
# Get the starting key and starting sentence.
# If there is more than 1 param, get the last 2 as the key.
# Note that self.key_length is fixed to 2 in this implementation
if len(params) > 1:
key = params[-self.key_length :]
# Copy the entire params for the sentence
sentences[0] = params.copy()
elif len(params) == 1:
# First we try to find if this word was once used as the first word in a sentence:
key = self.db.get_next_single_start(params[0])
if key is None:
# If this failed, we try to find the next word in the grammar as a whole
key = self.db.get_next_single_initial(0, params[0])
if key is None:
# Return a message that this word hasn't been learned yet
return f'I haven\'t extracted "{params[0]}" from chat yet.', False
# Copy this for the sentence
sentences[0] = key.copy()
else: # if there are no params
# Get starting key
key = self.db.get_start()
if key:
# Copy this for the sentence
sentences[0] = key.copy()
else:
# If nothing's ever been said
return "There is not enough learned information yet.", False
# Counter to prevent infinite loops (i.e. constantly generating <END> while below the
# minimum number of words to generate)
i = 0
while (
self.sentence_length(sentences) < self.max_sentence_length
and i < self.max_sentence_length * 2
):
# Use key to get next word
if i == 0:
# Prevent fetching <END> on the first word
word = self.db.get_next_initial(i, key)
else:
word = self.db.get_next(i, key)
i += 1
if word == "<END>" or word is None:
# Break, unless we are before the min_sentence_length
if i < self.min_sentence_length:
key = self.db.get_start()
# Ensure that the key can be generated. Otherwise we still stop.
if key:
# Start a new sentence
sentences.append([])
for entry in key:
sentences[-1].append(entry)
continue
break
# Otherwise add the word
sentences[-1].append(word)
# Shift the key so on the next iteration it gets the next item
key.pop(0)
key.append(word)
# If there were params, but the sentence resulting is identical to the params
# Then the params did not result in an actual sentence
# If so, restart without params
if len(params) > 0 and params == sentences[0]:
return "I haven't learned what to do with \"" + detokenize(
params[-self.key_length :]
) + '" yet.', False
return self.sent_separator.join(
detokenize(sentence) for sentence in sentences
), True
def sentence_length(self, sentences: List[List[str]]) -> int:
"""Given a list of tokens representing a sentence, return the number of words in there.
Args:
sentences (List[List[str]]): List of lists of tokens that make up a sentence,
where a token is a word or punctuation. For example:
[['Hello', ',', 'you', "'re", 'Tom', '!'], ['Yes', ',', 'I', 'am', '.']]
This would return 6.
Returns:
int: The number of words in the sentence.
"""
count = 0
for sentence in sentences:
for token in sentence:
if token not in string.punctuation and token[0] != "'":
count += 1
return count
def extract_modifiers(self, emotes: str) -> List[str]:
"""Extract emote modifiers from emotes, such as the the horizontal flip.
Args:
emotes (str): String containing all emotes used in the message.
Returns:
List[str]: List of strings that show modifiers, such as "_HZ" for horizontal flip.
"""
output = []
try:
while emotes:
u_index = emotes.index("_")
c_index = emotes.index(":", u_index)
output.append(emotes[u_index:c_index])
emotes = emotes[c_index:]
except ValueError:
pass
return output
def write_blacklist(self, blacklist: List[str]) -> None:
"""Write blacklist.txt given a list of banned words.
Args:
blacklist (List[str]): The list of banned words to write.
"""
logger.debug("Writing Blacklist...")
with open("blacklist.txt", "w") as f:
f.write("\n".join(sorted(blacklist, key=lambda x: len(x), reverse=True)))
logger.debug("Written Blacklist.")
def set_blacklist(self) -> None:
"""Read blacklist.txt and set `self.blacklist` to the list of banned words."""
logger.debug("Loading Blacklist...")
try:
with open("blacklist.txt", "r") as f:
self.blacklist = [line.replace("\n", "") for line in f.readlines()]
logger.debug("Loaded Blacklist.")
except FileNotFoundError:
logger.warning("Loading Blacklist Failed!")
self.blacklist = ["<start>", "<end>"]
self.write_blacklist(self.blacklist)
def send_help_message(self) -> None:
"""Send a Help message to the connected chat, as long as the bot wasn't disabled."""
if self._enabled:
logger.info("Help message sent.")
try:
self.ws.send_message(
"Learn how this bot generates sentences here: https://github.com/CubieDev/TwitchMarkovChain#how-it-works"
)
except socket.OSError as error:
logger.warning(
f"[OSError: {error}] upon sending help message. Ignoring."
)
def send_automatic_generation_message(self) -> None:
"""Send an automatic generation message to the connected chat.
As long as the bot wasn't disabled, just like if someone typed "!g" in chat.
"""
if self._enabled:
sentence, success = self.generate()
if success:
logger.info(sentence)
# Try to send a message. Just log a warning on fail
try:
self.ws.send_message(sentence)
except socket.OSError as error:
logger.warning(
f"[OSError: {error}] upon sending automatic generation message. Ignoring."
)
else:
logger.info(
"Attempted to output automatic generation message, but there is not enough learned information yet."
)
def send_whisper(self, user: str, message: str) -> None:
"""Optionally send a whisper, only if "WhisperCooldown" is True.
Args:
user (str): The user to potentially whisper.
message (str): The message to potentially whisper
"""
if self.whisper_cooldown:
self.ws.send_whisper(user, message)
def check_filter(self, message: str) -> bool:
"""Returns True if message contains a banned word.
Args:
message (str): The message to check.
"""
for word in tokenize(message):
if word.lower() in self.blacklist:
return True
return False
def check_if_our_command(self, message: str, *commands: "Tuple[str]") -> bool:
"""True if the first "word" of the message is in the tuple of commands
Args:
message (str): The message to check for a command.
commands (Tuple[str]): A tuple of commands.
Returns:
bool: True if the first word in message is one of the commands.
"""
return message.split()[0] in commands
def check_if_generate(self, message: str) -> bool:
"""True if the first "word" of the message is one of the defined generate commands.
Args:
message (str): The message to check for the generate command (i.e !generate or !g).
Returns:
bool: True if the first word in message is a generate command.
"""
return self.check_if_our_command(message, *self.generate_commands)
def check_if_other_command(self, message: str) -> bool:
"""True if the message is any command, except /me.
Is used to avoid learning and generating commands.
Args:
message (str): The message to check.
Returns:
bool: True if the message is any potential command (starts with a '!', '/' or '.')
with the exception of /me.
"""
return message.startswith(("!", "/", ".")) and not message.startswith("/me")
def check_if_permissions(self, m: Message) -> bool:
"""True if the user has heightened permissions.
E.g. permissions to bypass cooldowns, update settings, disable the bot, etc.
True for the streamer themselves, and the users set as the allowed users.
Args:
m (Message): The Message object that was sent from Twitch.
Has `user` and `channel` attributes.
"""
return m.user == m.channel or m.user in self.allowed_users
def check_link(self, message: str) -> bool:
"""True if `message` contains a link.
Args:
message (str): The message to check for a link.
Returns:
bool: True if the message contains a link.
"""
return self.link_regex.search(message)
if __name__ == "__main__":
MarkovChain()

View file

@ -1,205 +0,0 @@
import json
import os
import logging
from functools import lru_cache
from pathlib import Path
from typing import List
try:
from typing import TypedDict
except ImportError:
TypedDict = object
logger = logging.getLogger(__name__)
class SettingsData(TypedDict):
Host: str
Port: int
Channel: str
Nickname: str
Authentication: str
DeniedUsers: List[str]
AllowedUsers: List[str]
Cooldown: int
KeyLength: int
MaxSentenceWordAmount: int
MinSentenceWordAmount: int
HelpMessageTimer: int
AutomaticGenerationTimer: int
WhisperCooldown: bool
EnableGenerateCommand: bool
SentenceSeparator: str
class Settings:
DEFAULTS: SettingsData = {
"Host": "irc.chat.twitch.tv",
"Port": 6667,
"Channel": "#<channel>",
"Nickname": "<name>",
"Authentication": "oauth:<auth>",
"DeniedUsers": ["StreamElements", "Nightbot", "Moobot", "Marbiebot"],
"AllowedUsers": [],
"Cooldown": 20,
"KeyLength": 2,
"MaxSentenceWordAmount": 25,
"MinSentenceWordAmount": -1,
"HelpMessageTimer": 60 * 60 * 5, # 18000 seconds, 5 hours
"AutomaticGenerationTimer": -1,
"WhisperCooldown": True,
"EnableGenerateCommand": True,
"SentenceSeparator": " - ",
"AllowGenerateParams": True,
"GenerateCommands": ["!generate", "!g"],
}
def __init__(self, settings_path: Path | None = None) -> None:
self.settings_path = settings_path or Path("settings.json")
@lru_cache(maxsize=1)
def read_settings(self) -> dict:
self.update_v2()
try:
# Try to load the file using json.
# And pass the data to the Bot class instance if this succeeds.
with self.settings_path.open("r") as f:
text_settings = f.read()
settings: SettingsData = json.loads(text_settings)
self.update_v1(settings)
# Check if any settings keys are missing, and if so, write the defaults
# to the settings.json
if settings.keys() != Settings.DEFAULTS.keys():
missing_keys = set(Settings.DEFAULTS.keys()) - set(settings.keys())
# Log the missing keys
logger.info(
f"The following keys were missing from {self.settings_path}: {', '.join(map(repr, missing_keys))}."
)
logger.info(
f"These defaults of these values were used, and added to {self.settings_path}. Default behaviour will not change."
)
# Add missing defaults
settings = {**Settings.DEFAULTS, **settings}
self.write_settings_file(settings)
return settings
except ValueError:
logger.error("Error in settings file.")
raise ValueError("Error in settings file.")
except FileNotFoundError:
self.write_default_settings_file()
raise ValueError("Please fix your settings file that was just generated.")
def update_v1(self, settings: SettingsData) -> None:
"""Update settings file to remove the BannedWords field, in favor for a blacklist.txt file.
Args:
settings (SettingsData): [description]
"""
# "BannedWords" is only a key in the settings in older versions.
# We moved to a separate file for blacklisted words.
if "BannedWords" in settings:
logger.info("Updating Blacklist system to new version...")
try:
with open("blacklist.txt", "r+") as f:
logger.info("Moving Banned Words to the blacklist.txt file...")
# Read the data, and split by word or phrase, then add BannedWords
banned_list = f.read().split("\n") + settings["BannedWords"]
# Remove duplicates and sort by length, longest to shortest
banned_list = sorted(
list(set(banned_list)), key=lambda x: len(x), reverse=True
)
# Clear file, and then write in the new data
f.seek(0)
f.truncate(0)
f.write("\n".join(banned_list))
logger.info("Moved Banned Words to the blacklist.txt file.")
except FileNotFoundError:
with open("blacklist.txt", "w") as f:
logger.info("Moving Banned Words to a new blacklist.txt file...")
# Remove duplicates and sort by length, longest to shortest
banned_list = sorted(
list(set(settings["BannedWords"])),
key=lambda x: len(x),
reverse=True,
)
f.write("\n".join(banned_list))
logger.info("Moved Banned Words to a new blacklist.txt file.")
# Remove BannedWords list from data dictionary, and then write it to the settings file
del settings["BannedWords"]
with self.settings_path.open("w") as f:
f.write(json.dumps(settings, indent=4, separators=(",", ": ")))
logger.info("Updated Blacklist system to new version.")
def update_v2(
self,
) -> None:
"""Converts `settings.txt` to `settings.json`, and adds missing new fields."""
try:
# Try to load the old settings.txt file using json.
with self.settings_path.open("r") as f:
settings = f.read()
data: SettingsData = json.loads(settings)
# Add missing fields from Settings.DEFAULT to data
corrected_data = {**Settings.DEFAULTS, **data}
# Write the new settings file
with self.settings_path.open("w") as f:
f.write(json.dumps(corrected_data, indent=4, separators=(",", ": ")))
os.remove("settings.txt")
logger.info(
'Updated Settings system to new version. See "settings.json" for new fields, and README.md for information on these fields.'
)
except FileNotFoundError:
# If settings.txt does not exist, then we're not on an old version.
pass
def write_default_settings_file(self) -> None:
"""Create a standardised settings file with default values."""
self.write_settings_file(Settings.DEFAULTS)
def write_settings_file(self, settings: SettingsData) -> None:
with open(self.settings_path, "w") as f:
f.write(json.dumps(settings, indent=4, separators=(",", ": ")))
def update_cooldown(self, cooldown: int) -> None:
"""Update the "Cooldown" value in the settings file.
Args:
cooldown (int): The integer representing the amount of seconds of cooldown
between outputted generations.
"""
with self.settings_path.open("r") as f:
settings = f.read()
data = json.loads(settings)
data["Cooldown"] = cooldown
with self.settings_path.open("w") as f:
f.write(json.dumps(data, indent=4, separators=(",", ": ")))
def get_channel(self) -> str:
"""Get the "Channel" value from the settings file.
Returns:
str: The name of the Channel described in the settings file.
Stripped of "#" and converted to lowercase.
"""
settings = self.read_settings()
return settings["Channel"].replace("#", "").lower()
@lru_cache(maxsize=1)
def get_settings(settings_path: Path | None = None) -> Settings:
return Settings(settings_path)

View file

@ -1,12 +1,11 @@
import sqlite3
import logging import logging
import random import random
import sqlite3
import string import string
from typing import Any, List, Optional, Tuple from typing import Any
import platformdirs import platformdirs
from loguru import logger
logger = logging.getLogger(__name__)
class Database: class Database:
@ -89,12 +88,13 @@ class Database:
def __init__(self, channel: str): def __init__(self, channel: str):
self.user_data_path = platformdirs.user_data_path( self.user_data_path = platformdirs.user_data_path(
"markovbot_gui", ensure_exists=True "markovbot_gui",
ensure_exists=True,
) )
self.db_path = ( self.db_path = (
self.user_data_path / f"MarkovChain_{channel.replace('#', '').lower()}.db" self.user_data_path / f"MarkovChain_{channel.replace('#', '').lower()}.db"
) )
self._execute_queue = [] self._execute_queue: list = []
if self.db_path.is_file(): if self.db_path.is_file():
# Ensure the database is updated to the newest version # Ensure the database is updated to the newest version
@ -103,7 +103,7 @@ class Database:
self.update_v3(channel) self.update_v3(channel)
# Create database tables. # Create database tables.
for first_char in list(string.ascii_uppercase) + ["_"]: for first_char in [*list(string.ascii_uppercase), "_"]:
self.add_execute_queue( self.add_execute_queue(
f""" f"""
CREATE TABLE IF NOT EXISTS MarkovStart{first_char} ( CREATE TABLE IF NOT EXISTS MarkovStart{first_char} (
@ -115,7 +115,7 @@ class Database:
""", """,
auto_commit=False, auto_commit=False,
) )
for second_char in list(string.ascii_uppercase) + ["_"]: for second_char in [*list(string.ascii_uppercase), "_"]:
self.add_execute_queue( self.add_execute_queue(
f""" f"""
CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} ( CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} (
@ -186,7 +186,8 @@ class Database:
""" """
# If an old version of the Database is used, update the database # If an old version of the Database is used, update the database
if ("MarkovGrammarA",) in self.execute( if ("MarkovGrammarA",) in self.execute(
"SELECT name FROM sqlite_master WHERE type='table';", fetch=True "SELECT name FROM sqlite_master WHERE type='table';",
fetch=True,
): ):
logger.info("Creating backup before updating Database...") logger.info("Creating backup before updating Database...")
# Connect to both the new and backup, backup, and close both # Connect to both the new and backup, backup, and close both
@ -196,7 +197,7 @@ class Database:
conn = sqlite3.connect(f"MarkovChain_{channel.replace('#', '').lower()}.db") conn = sqlite3.connect(f"MarkovChain_{channel.replace('#', '').lower()}.db")
back_conn = sqlite3.connect( back_conn = sqlite3.connect(
f"MarkovChain_{channel.replace('#', '').lower()}_backup.db" f"MarkovChain_{channel.replace('#', '').lower()}_backup.db",
) )
with back_conn: with back_conn:
conn.backup(back_conn, pages=1000, progress=progress) conn.backup(back_conn, pages=1000, progress=progress)
@ -228,10 +229,10 @@ class Database:
# Copy data from Other to _ and remove Other # Copy data from Other to _ and remove Other
self.add_execute_queue( self.add_execute_queue(
"INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammarOther;" "INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammarOther;",
) )
self.add_execute_queue( self.add_execute_queue(
"INSERT INTO MarkovStart_ SELECT * FROM MarkovStartOther;" "INSERT INTO MarkovStart_ SELECT * FROM MarkovStartOther;",
) )
self.add_execute_queue("DROP TABLE MarkovGrammarOther") self.add_execute_queue("DROP TABLE MarkovGrammarOther")
self.add_execute_queue("DROP TABLE MarkovStartOther") self.add_execute_queue("DROP TABLE MarkovStartOther")
@ -241,17 +242,17 @@ class Database:
# Same with MarkovStart. # Same with MarkovStart.
for character in list(string.digits): for character in list(string.digits):
self.add_execute_queue( self.add_execute_queue(
f"INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammar{character}" f"INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammar{character}", # noqa: S608
) )
self.add_execute_queue(f"DROP TABLE MarkovGrammar{character}") self.add_execute_queue(f"DROP TABLE MarkovGrammar{character}")
self.add_execute_queue( self.add_execute_queue(
f"INSERT INTO MarkovStart_ SELECT * FROM MarkovStart{character}" f"INSERT INTO MarkovStart_ SELECT * FROM MarkovStart{character}", # noqa: S608
) )
self.add_execute_queue(f"DROP TABLE MarkovStart{character}") self.add_execute_queue(f"DROP TABLE MarkovStart{character}")
self.execute_commit() self.execute_commit()
# Split up MarkovGrammarA into MarkovGrammarAA, MarkovGrammarAB, etc. # Split up MarkovGrammarA into MarkovGrammarAA, MarkovGrammarAB, etc.
for first_char in list(string.ascii_uppercase) + ["_"]: for first_char in [*list(string.ascii_uppercase), "_"]:
for second_char in list(string.ascii_uppercase): for second_char in list(string.ascii_uppercase):
self.add_execute_queue(f""" self.add_execute_queue(f"""
CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} ( CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} (
@ -263,10 +264,10 @@ class Database:
); );
""") """)
self.add_execute_queue( self.add_execute_queue(
f'INSERT INTO MarkovGrammar{first_char}{second_char} SELECT * FROM MarkovGrammar{first_char} WHERE word2 LIKE "{second_char}%";' f'INSERT INTO MarkovGrammar{first_char}{second_char} SELECT * FROM MarkovGrammar{first_char} WHERE word2 LIKE "{second_char}%";', # noqa: S608
) )
self.add_execute_queue( self.add_execute_queue(
f'DELETE FROM MarkovGrammar{first_char} WHERE word2 LIKE "{second_char}%";' f'DELETE FROM MarkovGrammar{first_char} WHERE word2 LIKE "{second_char}%";', # noqa: S608
) )
self.add_execute_queue(f""" self.add_execute_queue(f"""
@ -279,7 +280,7 @@ class Database:
); );
""") """)
self.add_execute_queue( self.add_execute_queue(
f"INSERT INTO MarkovGrammar{first_char}_ SELECT * FROM MarkovGrammar{first_char};" f"INSERT INTO MarkovGrammar{first_char}_ SELECT * FROM MarkovGrammar{first_char};", # noqa: S608
) )
self.add_execute_queue(f"DROP TABLE MarkovGrammar{first_char}") self.add_execute_queue(f"DROP TABLE MarkovGrammar{first_char}")
self.execute_commit() self.execute_commit()
@ -300,17 +301,17 @@ class Database:
fetch=True, fetch=True,
): ):
logger.info("Updating Database to new version...") logger.info("Updating Database to new version...")
for first_char in list(string.ascii_uppercase) + ["_"]: for first_char in [*list(string.ascii_uppercase), "_"]:
for second_char in list(string.ascii_uppercase) + ["_"]: for second_char in [*list(string.ascii_uppercase), "_"]:
self.execute( self.execute(
f"ALTER TABLE MarkovGrammar{first_char}{second_char} RENAME COLUMN occurances TO count;" f"ALTER TABLE MarkovGrammar{first_char}{second_char} RENAME COLUMN occurances TO count;",
) )
self.execute( self.execute(
f"ALTER TABLE MarkovStart{first_char} RENAME COLUMN occurances TO count;" f"ALTER TABLE MarkovStart{first_char} RENAME COLUMN occurances TO count;",
) )
logger.info("Finished Updating Database to new version.") logger.info("Finished Updating Database to new version.")
def update_v3(self, channel: str) -> None: def update_v3(self, channel: str) -> None: # noqa: C901, PLR0915
"""Update the Database structure to mark punctuation as a separate word. """Update the Database structure to mark punctuation as a separate word.
Previously, "Hello," was a valid single word. Now, it would be split as "Hello" and ",". Previously, "Hello," was a valid single word. Now, it would be split as "Hello" and ",".
@ -338,26 +339,31 @@ class Database:
# in which case we definitely want to upgrade. # in which case we definitely want to upgrade.
try: try:
version = self.execute( version = self.execute(
"SELECT version FROM Version ORDER BY version DESC LIMIT 1;", fetch=True "SELECT version FROM Version ORDER BY version DESC LIMIT 1;",
fetch=True,
) )
except sqlite3.OperationalError: except sqlite3.OperationalError:
version = [] version = []
# Whether to upgrade # Whether to upgrade
if not version or version[0][0] < 3: if not version or version[0][0] < 3: # noqa: PLR2004
logger.info( logger.info(
"Updating Database to new version - supports better punctuation handling." "Updating Database to new version - supports better punctuation handling.",
) )
from shutil import copyfile from shutil import copyfile
import os
from Tokenizer import tokenize
from nltk import ngrams from nltk import ngrams
from src.markovbot_gui.libs.tokenizer import tokenize
channel = channel.replace("#", "").lower() channel = channel.replace("#", "").lower()
copyfile(f"MarkovChain_{channel}.db", f"MarkovChain_{channel}_modified.db") copyfile(
self.db_path,
self.user_data_path / f"MarkovChain_{channel}_modified.db",
)
logger.info( logger.info(
f'Created a copy of the database called "MarkovChain_{channel}_modified.db". The update will modify this file.' f'Created a copy of the database called "MarkovChain_{channel}_modified.db". The update will modify this file.',
) )
# Temporarily set self.db_name to the modified one # Temporarily set self.db_name to the modified one
@ -367,7 +373,7 @@ class Database:
) )
# Create database tables. # Create database tables.
for first_char in list(string.ascii_uppercase) + ["_"]: for first_char in [*list(string.ascii_uppercase), "_"]:
table = f"MarkovStart{first_char}" table = f"MarkovStart{first_char}"
self.add_execute_queue( self.add_execute_queue(
f""" f"""
@ -380,7 +386,7 @@ class Database:
""", """,
auto_commit=False, auto_commit=False,
) )
for second_char in list(string.ascii_uppercase) + ["_"]: for second_char in [*list(string.ascii_uppercase), "_"]:
table = f"MarkovGrammar{first_char}{second_char}" table = f"MarkovGrammar{first_char}{second_char}"
self.add_execute_queue( self.add_execute_queue(
f""" f"""
@ -396,23 +402,23 @@ class Database:
) )
self.execute_commit() self.execute_commit()
def modify_start(table: str) -> None: def modify_start(table_name: str) -> None:
"""Read all data from `table`, re-tokenize it, distribute the new first 2 tokens to _modified tables, and drop `table`. """Read all data from `table_name`, re-tokenize it, distribute the new first 2 tokens to _modified tables, and drop `table`.
Args: Args:
table (str): The name of the table to work on. table_name (str): The name of the table to work on.
""" """
data = self.execute(f"SELECT * FROM {table};", fetch=True) data = self.execute(f"SELECT * FROM {table_name};", fetch=True) # noqa: S608
for tup in data: for tup in data:
# Remove "count" from tup for now # Remove "count" from tup for now
count = tup[-1] count = tup[-1]
tup = tup[:-1] tup = tup[:-1] # noqa: PLW2901
raw_string = " ".join(tup) raw_string = " ".join(tup)
tokenized = tokenize(raw_string) tokenized = tokenize(raw_string)
two_gram = tokenized[:2] two_gram = tokenized[:2]
# In case there was some issue in the previous Database # In case there was some issue in the previous Database
if len(two_gram) < 2: if len(two_gram) < 2: # noqa: PLR2004
continue continue
self.add_execute_queue( self.add_execute_queue(
f""" f"""
@ -425,31 +431,31 @@ class Database:
), ),
1 1
) )
)""", )""", # noqa: S608
values=two_gram + two_gram, values=two_gram + two_gram,
auto_commit=False, auto_commit=False,
) )
self.execute(f"DROP TABLE {table};") self.execute(f"DROP TABLE {table_name};")
def modify_grammar(table: str) -> None: def modify_grammar(table_name: str) -> None:
"""Read all data from `table`, re-tokenize it, distribute the new 3-grams to _modified tables, and drop `table`. """Read all data from `table_name`, re-tokenize it, distribute the new 3-grams to _modified tables, and drop `table`.
Args: Args:
table (str): The name of the table to work on. table_name (str): The name of the table to work on.
""" """
data = self.execute(f"SELECT * FROM {table};", fetch=True) data = self.execute(f"SELECT * FROM {table_name};", fetch=True) # noqa: S608
for tup in data: for tup in data:
# Remove "count" from tup for now # Remove "count" from tup for now
count = tup[-1] count = tup[-1]
tup = tup[:-1] tup = tup[:-1] # noqa: PLW2901
# If ends on "<END>", ignore that in in the tuple, as we don't want it to get # If ends on "<END>", ignore that in in the tuple, as we don't want it to get
# tokenized. # tokenized.
end = False end = False
if tup[-1] == "<END>": if tup[-1] == "<END>":
end = True end = True
tup = tup[:-1] tup = tup[:-1] # noqa: PLW2901
raw_string = " ".join(tup) raw_string = " ".join(tup)
tokenized = tokenize(raw_string) tokenized = tokenize(raw_string)
@ -474,39 +480,39 @@ class Database:
), ),
1 1
) )
)""", )""", # noqa: S608
values=ngram + ngram, values=ngram + ngram,
auto_commit=False, auto_commit=False,
) )
self.execute(f"DROP TABLE {table};") self.execute(f"DROP TABLE {table_name};")
# Modify all tables # Modify all tables
i = 0 i = 0
total = 27 * 27 + 27 # The number of tables to convert total = 27 * 27 + 27 # The number of tables to convert
for first_char in list(string.ascii_uppercase) + ["_"]: for first_char in [*list(string.ascii_uppercase), "_"]:
table = f"MarkovStart{first_char}" table = f"MarkovStart{first_char}"
modify_start(table) modify_start(table)
i += 1 i += 1
for second_char in list(string.ascii_uppercase) + ["_"]: for second_char in [*list(string.ascii_uppercase), "_"]:
table = f"MarkovGrammar{first_char}{second_char}" table = f"MarkovGrammar{first_char}{second_char}"
modify_grammar(table) modify_grammar(table)
i += 1 i += 1
logger.debug( logger.debug(
f"[{i / total * 100:.2f}%] Scheduled updates for the tables for words starting in {first_char}." f"[{i / total * 100:.2f}%] Scheduled updates for the tables for words starting in {first_char}.",
) )
logger.info("Starting executing table update...") logger.info("Starting executing table update...")
self.execute_commit() self.execute_commit()
logger.info("Finished executing table update.") logger.info("Finished executing table update.")
# Rename the _modified tables to normal tables again # Rename the _modified tables to normal tables again
for first_char in list(string.ascii_uppercase) + ["_"]: for first_char in [*list(string.ascii_uppercase), "_"]:
table = f"MarkovStart{first_char}" table = f"MarkovStart{first_char}"
self.add_execute_queue( self.add_execute_queue(
f"ALTER TABLE {table}_modified RENAME TO {table};", f"ALTER TABLE {table}_modified RENAME TO {table};",
auto_commit=False, auto_commit=False,
) )
for second_char in list(string.ascii_uppercase) + ["_"]: for second_char in [*list(string.ascii_uppercase), "_"]:
table = f"MarkovGrammar{first_char}{second_char}" table = f"MarkovGrammar{first_char}{second_char}"
self.add_execute_queue( self.add_execute_queue(
f"ALTER TABLE {table}_modified RENAME TO {table};", f"ALTER TABLE {table}_modified RENAME TO {table};",
@ -516,8 +522,12 @@ class Database:
# Turn the non-modified, old version of the Database into a "_backup.db" file, # Turn the non-modified, old version of the Database into a "_backup.db" file,
# and turn the modified file into the new main file. # and turn the modified file into the new main file.
os.rename(f"MarkovChain_{channel}.db", f"MarkovChain_{channel}_backup.db") self.db_path.rename(self.db_path / f"MarkovChain_{channel}_backup.db")
os.rename(f"MarkovChain_{channel}_modified.db", f"MarkovChain_{channel}.db") (self.user_data_path / f"MarkovChain_{channel}_modified.db").rename(
self.db_path / f"MarkovChain_{channel}.db",
)
# os.rename(f"MarkovChain_{channel}.db", f"MarkovChain_{channel}_backup.db") # noqa: ERA001
# os.rename(f"MarkovChain_{channel}_modified.db", f"MarkovChain_{channel}.db") # noqa: ERA001
# Revert to using .db instead of _modified.db # Revert to using .db instead of _modified.db
self.db_path = ( self.db_path = (
@ -533,17 +543,20 @@ class Database:
self.execute("INSERT INTO Version (version) VALUES (3);") self.execute("INSERT INTO Version (version) VALUES (3);")
logger.info( logger.info(
f'Renamed original database file "MarkovChain_{channel}.db" to "MarkovChain_{channel}_backup.db". This file is *not* used, and can safely be deleted.' f'Renamed original database file "MarkovChain_{channel}.db" to "MarkovChain_{channel}_backup.db". This file is *not* used, and can safely be deleted.',
) )
logger.info( logger.info(
f'Renamed updated database file "MarkovChain_{channel}_modified.db" to "MarkovChain_{channel}.db".' f'Renamed updated database file "MarkovChain_{channel}_modified.db" to "MarkovChain_{channel}.db".',
) )
logger.info( logger.info(
f'This updated "MarkovChain_{channel}.db" will be used to drive the Twitch bot.' f'This updated "MarkovChain_{channel}.db" will be used to drive the Twitch bot.',
) )
def add_execute_queue( def add_execute_queue(
self, sql: str, values: Tuple[Any] = None, auto_commit: bool = True self,
sql: str,
values: tuple[Any] | list[Any] | None = None,
auto_commit: bool = True,
) -> None: ) -> None:
"""Add query and corresponding values to a queue, to be executed all at once. """Add query and corresponding values to a queue, to be executed all at once.
@ -553,15 +566,15 @@ class Database:
Args: Args:
sql (str): The SQL query to add, potentially with "?" for where sql (str): The SQL query to add, potentially with "?" for where
a value ought to be filled in. a value ought to be filled in.
values ([Tuple[Any]], optional): Optional tuple of values to replace "?" in SQL queries. values ([tuple[Any]], optional): Optional tuple of values to replace "?" in SQL queries.
Defaults to None. Defaults to None.
""" """
if values is not None: if values is not None:
self._execute_queue.append([sql, values]) self._execute_queue.append((sql, values))
else: else:
self._execute_queue.append([sql]) self._execute_queue.append((sql,))
# Commit these executes if there are more than 25 queries # Commit these executes if there are more than 25 queries
if auto_commit and len(self._execute_queue) > 25: if auto_commit and len(self._execute_queue) > 25: # noqa: PLR2004
self.execute_commit() self.execute_commit()
def execute_commit(self, fetch: bool = False) -> Any: def execute_commit(self, fetch: bool = False) -> Any:
@ -584,14 +597,15 @@ class Database:
cur.execute("commit") cur.execute("commit")
if fetch: if fetch:
return cur.fetchall() return cur.fetchall()
return None
def execute(self, sql: str, values: Tuple[Any] = None, fetch: bool = False): def execute(self, sql: str, values: tuple[Any] | None = None, fetch: bool = False):
"""Execute the SQL query with the corresponding values, potentially returning a result. """Execute the SQL query with the corresponding values, potentially returning a result.
Args: Args:
sql (str): The SQL query to add, potentially with "?" for where sql (str): The SQL query to add, potentially with "?" for where
a value ought to be filled in. a value ought to be filled in.
values ([Tuple[Any]], optional): Optional tuple of values to replace "?" in SQL queries. values ([tuple[Any]], optional): Optional tuple of values to replace "?" in SQL queries.
Defaults to None. Defaults to None.
fetch (bool, optional): Whether to return the fetchall() of the SQL queries. fetch (bool, optional): Whether to return the fetchall() of the SQL queries.
Defaults to False. Defaults to False.
@ -608,6 +622,7 @@ class Database:
conn.commit() conn.commit()
if fetch: if fetch:
return cur.fetchall() return cur.fetchall()
return None
@staticmethod @staticmethod
def get_suffix(character: str) -> str: def get_suffix(character: str) -> str:
@ -636,7 +651,7 @@ class Database:
values=(username,), values=(username,),
) )
def check_whisper_ignore(self, username: str) -> List[Tuple[str]]: def check_whisper_ignore(self, username: str) -> list[tuple[str]]:
"""Returns a non-empty list only if `username` is in the WhisperIgnore table. """Returns a non-empty list only if `username` is in the WhisperIgnore table.
Otherwise, returns an empty list. Is used to ensure that a user who doesn't want to be Otherwise, returns an empty list. Is used to ensure that a user who doesn't want to be
@ -646,7 +661,7 @@ class Database:
username (str): The username of the user to check. username (str): The username of the user to check.
Returns: Returns:
List[Tuple[str]]: Either an empty list, or [('test_user',)]. list[tuple[str]]: Either an empty list, or [('test_user',)].
Allows the use of `if not check_whisper_ignore(user): whisper(user)` Allows the use of `if not check_whisper_ignore(user): whisper(user)`
""" """
return self.execute( return self.execute(
@ -685,14 +700,14 @@ class Database:
""" """
return items[0] * len(items) == items return items[0] * len(items) == items
def get_next(self, index: int, words: List[str]) -> Optional[str]: def get_next(self, index: int, words: list | tuple) -> str | None:
"""Generate the next word in the sentence using learned data, given the previous `key_length` words. """Generate the next word in the sentence using learned data, given the previous `key_length` words.
`key_length` is set to 2 by default, and cannot easily be changed. `key_length` is set to 2 by default, and cannot easily be changed.
Args: Args:
index (int): The index of this new word in the sentence. index (int): The index of this new word in the sentence.
words (List[str]): The previous 2 words. words (list[str]): The previous 2 words.
Returns: Returns:
Optional[str]: The next word in the sentence, generated given the learned data. Optional[str]: The next word in the sentence, generated given the learned data.
@ -701,14 +716,14 @@ class Database:
data = self.execute( data = self.execute(
f""" f"""
SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])} SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])}
WHERE word1 = ? AND word2 = ?;""", WHERE word1 = ? AND word2 = ?;""", # noqa: S608
values=words, values=words, # type: ignore[arg-type]
fetch=True, fetch=True,
) )
# Return a word picked from the data, using count as a weighting factor # Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else self.pick_word(data, index) return None if len(data) == 0 else self.pick_word(data, index)
def get_next_initial(self, index: int, words) -> Optional[str]: def get_next_initial(self, index: int, words) -> str | None:
"""Generate the next word in the sentence using learned data, given the previous `key_length` words. """Generate the next word in the sentence using learned data, given the previous `key_length` words.
`key_length` is set to 2 by default, and cannot easily be changed. `key_length` is set to 2 by default, and cannot easily be changed.
@ -716,7 +731,7 @@ class Database:
Args: Args:
index (int): The index of this new word in the sentence. index (int): The index of this new word in the sentence.
words (List[str]): The previous 2 words. words (list[str]): The previous 2 words.
Returns: Returns:
Optional[str]: The next word in the sentence, generated given the learned data. Optional[str]: The next word in the sentence, generated given the learned data.
@ -725,14 +740,14 @@ class Database:
data = self.execute( data = self.execute(
f""" f"""
SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])} SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])}
WHERE word1 = ? AND word2 = ? AND word3 != '<END>';""", WHERE word1 = ? AND word2 = ? AND word3 != '<END>';""", # noqa: S608
values=words, values=words,
fetch=True, fetch=True,
) )
# Return a word picked from the data, using count as a weighting factor # Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else self.pick_word(data, index) return None if len(data) == 0 else self.pick_word(data, index)
def get_next_single_initial(self, index: int, word: str) -> Optional[List[str]]: def get_next_single_initial(self, index: int, word: str) -> list[str] | None:
"""Generate the next word in the sentence using learned data, given the previous word. """Generate the next word in the sentence using learned data, given the previous word.
Randomly picks a start character for the second word by weighing all uppercase letters and "_" with their word frequency. Randomly picks a start character for the second word by weighing all uppercase letters and "_" with their word frequency.
@ -742,47 +757,48 @@ class Database:
word (str): The previous word. word (str): The previous word.
Returns: Returns:
Optional[List[str]]: The previous and newly generated word in the sentence as a list, generated given the learned data. Optional[list[str]]: The previous and newly generated word in the sentence as a list, generated given the learned data.
So, the previous word is taken directly the input of this method, and the second word is generated. So, the previous word is taken directly the input of this method, and the second word is generated.
""" """
# Randomly pick first character for the second word # Randomly pick first character for the second word
char_two = random.choices( char_two = random.choices( # noqa: S311
string.ascii_uppercase + "_", weights=self.word_frequency string.ascii_uppercase + "_",
weights=self.word_frequency,
)[0] )[0]
# Get all items # Get all items
data = self.execute( data = self.execute(
f""" f"""
SELECT word2, count FROM MarkovGrammar{self.get_suffix(word[0])}{char_two} SELECT word2, count FROM MarkovGrammar{self.get_suffix(word[0])}{char_two}
WHERE word1 = ? AND word2 != '<END>';""", WHERE word1 = ? AND word2 != '<END>';""", # noqa: S608
values=(word,), values=(word,),
fetch=True, fetch=True,
) )
# Return a word picked from the data, using count as a weighting factor # Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else [word] + [self.pick_word(data, index)] return None if len(data) == 0 else [word, self.pick_word(data, index)]
def get_next_single_start(self, word: str) -> Optional[List[str]]: def get_next_single_start(self, word: str) -> list[str] | None:
"""Generate the second word in the sentence using learned data, given the very first word in the sentence. """Generate the second word in the sentence using learned data, given the very first word in the sentence.
Args: Args:
word (str): The first word in the sentence. word (str): The first word in the sentence.
Returns: Returns:
Optional[List[str]]: The first and second word in the sentence as a list, generated given the learned data. Optional[list[str]]: The first and second word in the sentence as a list, generated given the learned data.
So, the first word is taken directly the input of this method, and the second word is generated. So, the first word is taken directly the input of this method, and the second word is generated.
""" """
# Get all items # Get all items
data = self.execute( data = self.execute(
f""" f"""
SELECT word2, count FROM MarkovStart{self.get_suffix(word[0])} SELECT word2, count FROM MarkovStart{self.get_suffix(word[0])}
WHERE word1 = ?;""", WHERE word1 = ?;""", # noqa: S608
values=(word,), values=(word,),
fetch=True, fetch=True,
) )
# Return a word picked from the data, using count as a weighting factor # Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else [word] + [self.pick_word(data)] return None if len(data) == 0 else [word, self.pick_word(data)]
@staticmethod @staticmethod
def pick_word(data: List[Tuple[str, int]], index: int = 0) -> str: def pick_word(data: list[tuple[str, int]], index: int = 0) -> str:
"""Randomly pick a word from `data` with word frequency as the weight. """Randomly pick a word from `data` with word frequency as the weight.
`index` is further used to decrease the weight of the <END> token for the first 15 words `index` is further used to decrease the weight of the <END> token for the first 15 words
@ -797,7 +813,7 @@ class Database:
Returns: Returns:
str: The pseudo-randomly picked word. str: The pseudo-randomly picked word.
""" """
return random.choices( return random.choices( # noqa: S311
data, data,
weights=[ weights=[
tup[-1] * ((index + 1) / 15) if tup[0] == "<END>" else tup[-1] tup[-1] * ((index + 1) / 15) if tup[0] == "<END>" else tup[-1]
@ -805,22 +821,24 @@ class Database:
], ],
)[0][0] )[0][0]
def get_start(self) -> List[str]: def get_start(self) -> list[str]:
"""Get a list of two words that mark as the start of a sentence. """Get a list of two words that mark as the start of a sentence.
This is randomly gathered from MarkovStart{character}. This is randomly gathered from MarkovStart{character}.
Returns: Returns:
List[str]: A list of two starting words, such as ["I", "am"]. list[str]: A list of two starting words, such as ["I", "am"].
""" """
# Find one character start from # Find one character start from
character = random.choices( character = random.choices( # noqa: S311
list(string.ascii_lowercase) + ["_"], weights=self.word_frequency, k=1 [*list(string.ascii_lowercase), "_"],
weights=self.word_frequency,
k=1,
)[0] )[0]
# Get all first word, second word, frequency triples, # Get all first word, second word, frequency triples,
# e.g. [("I", "am", 3), ("You", "are", 2), ...] # e.g. [("I", "am", 3), ("You", "are", 2), ...]
data = self.execute(f"SELECT * FROM MarkovStart{character};", fetch=True) data = self.execute(f"SELECT * FROM MarkovStart{character};", fetch=True) # noqa: S608
# If nothing has ever been said # If nothing has ever been said
if len(data) == 0: if len(data) == 0:
@ -828,10 +846,10 @@ class Database:
# Return a (weighted) randomly chosen 2-gram # Return a (weighted) randomly chosen 2-gram
return list( return list(
random.choices(data, weights=[tup[-1] for tup in data], k=1)[0][:-1] random.choices(data, weights=[tup[-1] for tup in data], k=1)[0][:-1], # noqa: S311
) )
def add_rule_queue(self, item: List[str]) -> None: def add_rule_queue(self, item: list[str]) -> None:
"""Adds a rule to the queue, ready to be entered into the knowledge base, given a 3-gram `item`. """Adds a rule to the queue, ready to be entered into the knowledge base, given a 3-gram `item`.
The rules on the queue are added with `self.add_execute_queue`, The rules on the queue are added with `self.add_execute_queue`,
@ -841,7 +859,7 @@ class Database:
we perform no learning. If we did, this could cause infinite recursion in generation. we perform no learning. If we did, this could cause infinite recursion in generation.
Args: Args:
item (List[str]): A 3-gram, e.g. ['How', 'are', 'you']. This is learned by placing this item (list[str]): A 3-gram, e.g. ['How', 'are', 'you']. This is learned by placing this
in the MarkovGrammarHA table, where it can be seen as: in the MarkovGrammarHA table, where it can be seen as:
*Given ["How", "are"], then "you" is a potential output* *Given ["How", "are"], then "you" is a potential output*
The frequency of this word as an output is then incremented, The frequency of this word as an output is then incremented,
@ -854,7 +872,7 @@ class Database:
"" in item "" in item
): # prevent adding invalid rules. Ideally this wouldn't trigger, but it seems to happen rarely. ): # prevent adding invalid rules. Ideally this wouldn't trigger, but it seems to happen rarely.
logger.warning( logger.warning(
f"Failed to add item to rules. Item contains empty string: {item!r}" f"Failed to add item to rules. Item contains empty string: {item!r}",
) )
return return
self.add_execute_queue( self.add_execute_queue(
@ -866,18 +884,18 @@ class Database:
WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY AND word3 = ? COLLATE BINARY WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY AND word3 = ? COLLATE BINARY
), ),
1) 1)
)""", )""", # noqa: S608
values=item + item, values=item + item,
) )
def add_start_queue(self, item: List[str]) -> None: def add_start_queue(self, item: list[str]) -> None:
"""Adds a rule to the queue, ready to be entered into the knowledge base, given a 2-gram `item`. """Adds a rule to the queue, ready to be entered into the knowledge base, given a 2-gram `item`.
The rules on the queue are added with `self.add_execute_queue`, The rules on the queue are added with `self.add_execute_queue`,
which automatically executes the queries in the queue when there are enough queries waiting. which automatically executes the queries in the queue when there are enough queries waiting.
Args: Args:
item (List[str]): A 2-gram, e.g. ['How', 'are']. This is learned by placing this item (list[str]): A 2-gram, e.g. ['How', 'are']. This is learned by placing this
in the MarkovStartH table, where it can be randomly (with frequency as weight) in the MarkovStartH table, where it can be randomly (with frequency as weight)
picked as a start of a sentence. picked as a start of a sentence.
""" """
@ -890,77 +908,6 @@ class Database:
WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY
), ),
1) 1)
)""", )""", # noqa: S608
values=item + item, values=item + item,
) )
def unlearn(self, message: str) -> None:
"""Remove frequency of 3-grams from `message` from the knowledge base.
Useful when a message is deleted - usually we want the bot to say those things less frequently.
The frequency count for each of the 3-grams is reduced by 5, i.e. the message is unlearned by 5
times the rate that a message is learned.
If this means the frequency for the 3-gram becomes negative,
we delete the 3-gram from the knowledge base entirely.
Args:
message (str): The message to unlearn.
"""
words = message.split(" ")
# Construct 3-grams
tuples = [
(words[i], words[i + 1], words[i + 2]) for i in range(0, len(words) - 2)
]
# Unlearn start of sentence from MarkovStart
if len(words) > 1:
# Reduce "count" by 5
self.add_execute_queue(
f"""
UPDATE MarkovStart{self.get_suffix(words[0][0])}
SET count = count - 5
WHERE word1 = ? AND word2 = ?;""",
values=(
words[0],
words[1],
),
)
# Delete if count is now less than 0.
self.add_execute_queue(
f"""
DELETE FROM MarkovStart{self.get_suffix(words[0][0])}
WHERE word1 = ? AND word2 = ? AND count <= 0;""",
values=(
words[0],
words[1],
),
)
# Unlearn all 3 word sections from Grammar
for word1, word2, word3 in tuples:
# Reduce "count" by 5
self.add_execute_queue(
f"""
UPDATE MarkovGrammar{self.get_suffix(word1[0])}{self.get_suffix(word2[0])}
SET count = count - 5
WHERE word1 = ? AND word2 = ? AND word3 = ?;""",
values=(
word1,
word2,
word3,
),
)
# Delete if count is now less than 0.
self.add_execute_queue(
f"""
DELETE FROM MarkovGrammar{self.get_suffix(word1[0])}{self.get_suffix(word2[0])}
WHERE word1 = ? AND word2 = ? AND word3 = ? AND count <= 0;""",
values=(
word1,
word2,
word3,
),
)
self.execute_commit()

View file

@ -0,0 +1,453 @@
import string
import time
from enum import StrEnum
from loguru import logger
from nltk.tokenize import sent_tokenize
from TwitchWebsocket import Message, TwitchWebsocket
from src.markovbot_gui.libs.db import Database
from src.markovbot_gui.libs.settings import Settings
from src.markovbot_gui.libs.timer import LoopingTimer
from src.markovbot_gui.libs.tokenizer import detokenize, tokenize
class Commands(StrEnum):
SET_COOLDOWN = "!setcd"
GENERATE = "!g"
BLACKLIST = "!blacklist"
GENERATE_HELP = "!ghelp"
class MarkovChain:
end_tag = "<END>"
def __init__(self, settings: Settings | None = None):
self.s = settings or Settings.read()
self.prev_message_t = 0.0
self._enabled = True
self.db = Database(self.s.channel_name)
if self.s.help_message_timer > 0:
if self.s.help_message_timer < 300: # noqa: PLR2004
raise ValueError(
'Value for "HelpMessageTimer" in must be at least 300 seconds, or a negative number for no help messages.', # noqa: EM101
)
t = LoopingTimer(self.s.help_message_timer, self._command_help)
t.start()
# Set up daemon Timer to send automatic generation messages
if self.s.automatic_generation_timer > 0:
if self.s.automatic_generation_timer < 30: # noqa: PLR2004
raise ValueError(
'Value for "Automatic_generation_message" must be at least 30 seconds, or a negative number for no ' # noqa: EM101
"automatic generations.",
)
t = LoopingTimer(
self.s.automatic_generation_timer,
self._command_automatic_generation,
)
t.start()
self.ws = TwitchWebsocket(
host=self.s.host,
port=self.s.port,
chan=self.s.channel_name,
nick=self.s.nickname,
auth=self.s.authentication,
callback=self.message_handler,
capability=["commands", "tags"],
live=True,
)
def run_bot(self):
self.ws.start_bot()
def stop_bot(self):
self.ws.leave_channel(self.s.channel_name)
self.ws.stop()
def _command_help(self) -> None:
"""Send a Help message to the connected chat, as long as the bot wasn't disabled."""
if self._enabled:
logger.info("Help message sent.")
try:
self.ws.send_message(
"Learn how this bot generates sentences here: https://github.com/CubieDev/TwitchMarkovChain#how-it-works",
)
except OSError as error:
logger.warning(
f"[OSError: {error}] upon sending help message. Ignoring.",
)
def _command_set_cooldown(self, username: str, split_message: list[str]):
if len(split_message) == 2: # noqa: PLR2004
try:
cooldown = int(split_message[1])
except ValueError:
self.ws.send_whisper(
username,
"The parameter must be an integer amount, eg: !setcd 30",
)
return
self.s.cooldown = cooldown
self.s.write()
self.ws.send_whisper(
username,
f"The !generate cooldown has been set to {cooldown} seconds.",
)
def _command_blacklist(self, username: str, split_message: list[str]):
if len(split_message) == 2: # noqa: PLR2004
try:
blacklisted_username = split_message[1]
except ValueError:
self.ws.send_whisper(
username,
"The parameter must be a username, eg: !blacklist ibai",
)
return
self.s.denied_users.append(blacklisted_username)
self.s.write()
def _command_generate(self, username: str, message: str):
cur_time = time.time()
if self.prev_message_t + self.s.cooldown >= cur_time:
if not self.db.check_whisper_ignore(username):
self.send_whisper(
username,
f"Cooldown hit: {self.prev_message_t + self.s.cooldown - cur_time:0.2f} out of {self.s.cooldown:.0f}s remaining. !nopm to stop these cooldown pm's.",
)
logger.info(
f"Cooldown hit with {self.prev_message_t + self.s.cooldown - cur_time:0.2f}s remaining.",
)
params = tokenize(message)[2:] if self.s.allow_generate_params else None
# Generate an actual sentence
sentence, success = self.generate(params)
if success:
# Reset cooldown if a message was actually generated
self.prev_message_t = time.time()
logger.info(sentence)
self.ws.send_message(sentence)
self.store_sentence(message)
def _command_automatic_generation(self) -> None:
"""Send an automatic generation message to the connected chat.
As long as the bot wasn't disabled, just like if someone typed "!g" in chat.
"""
if self._enabled:
logger.debug("Automatically generating message")
sentence, success = self.generate()
if success:
logger.info(
f"Created '{sentence}'. Cooling down for {self.s.automatic_generation_timer} seconds before regenerating",
)
try:
self.ws.send_message(sentence)
except OSError as error:
logger.warning(
f"[OSError: {error}] upon sending automatic generation message. Ignoring.",
)
else:
logger.info(
"Attempted to output automatic generation message, but there is not enough learned information yet.",
)
def store_sentence(self, message: str):
logger.info(f"Processing {message} in order to store it")
stripped_message = message.strip()
try:
sentences = sent_tokenize(stripped_message)
except LookupError:
logger.debug("Downloading required punkt resource...")
import nltk
nltk.download("punkt")
logger.debug("Downloaded required punkt resource.")
sentences = sent_tokenize(stripped_message)
for sentence in sentences:
words = tokenize(sentence)
# Double spaces will lead to invalid rules. We remove empty words here
if "" in words:
words = [word for word in words if word]
# If the sentence is too short, ignore it and move on to the next.
if len(words) <= self.s.key_length:
continue
# Add a new starting point for a sentence to the <START>
words = [words[x] for x in range(self.s.key_length)]
logger.debug(f"Adding {words} to start queue")
self.db.add_start_queue(words)
# Create Key variable which will be used as a key in the Dictionary for the grammar
key: list[str] = []
for word in words:
# Set up key for first use
if len(key) < self.s.key_length:
key.append(word)
continue
logger.debug(f"Adding {key}[{word}] to rule queue")
self.db.add_rule_queue([*key, word])
# Remove the first word, and add the current word,
# so that the key is correct for the next word.
key.pop(0)
key.append(word)
logger.debug(f"Adding {key} to rule queue")
# Add <END> at the end of the sentence
self.db.add_rule_queue([*key, self.end_tag])
def message_handler(self, message: Message): # noqa: C901, PLR0912
try:
if not message.user or message.user in self.s.denied_users:
logger.debug(f"User {message.user} can't send messages")
return
msgs = message.message.split()
if not msgs:
logger.debug("Message is empty")
return
if "bits" in message.tags:
return
if "emotes" in message.tags:
# Replace modified emotes with normal versions,
# as the bot will never have the modified emotes unlocked at the time.
for modifier in self.extract_modifiers(message.tags["emotes"]):
message.message = message.message.replace(modifier, "")
logger.debug(f"Received {msgs[0]} command from {message.user}")
match msgs[0]:
case Commands.GENERATE_HELP:
logger.debug("Executing _command_help()")
self._command_help()
case Commands.SET_COOLDOWN:
if self.is_mod(message.user, message.channel):
logger.debug(
f"User {message.user} is mod, executing _command_set_cooldown()",
)
self._command_set_cooldown(
split_message=msgs,
username=message.user,
)
case Commands.BLACKLIST:
if self.is_mod(message.user, message.channel):
logger.debug(
f"User {message.user} is a mod, executing _command_blacklist()",
)
self._command_blacklist(
split_message=msgs,
username=message.user,
)
case Commands.GENERATE:
if not self._enabled:
logger.info("Bot not enabled, skipping")
return
if message.user not in self.s.denied_users:
logger.info(
f"User {message.user} allowed to generate, executing _command_generate()",
)
self._command_generate(
message=message.message,
username=message.user,
)
case _:
logger.debug(
f"Not a command: {msgs[0]}. Storing into db as a plain message",
)
if message.type == "366":
logger.info(f"Successfully joined channel: #{message.channel}")
return
self.store_sentence(message.message)
except Exception: # noqa: BLE001
logger.exception(f"Could not process message {message}")
def generate(self, params: list[str] | None = None) -> tuple[str, bool]: # noqa: C901, PLR0912
"""Given an input sentence, generate the remainder of the sentence using the learned data.
Args:
params (list[str]): A list of words to use as an input to use as the start of generating.
Returns:
tuple[str, bool]: A tuple of a sentence as the first value, and a boolean indicating
whether the generation succeeded as the second value.
"""
params = params or []
# List of sentences that will be generated. In some cases, multiple sentences will be generated,
# e.g. when the first sentence has less words than self.min_sentence_length.
sentences: list[list | list[str]] = [[]]
# Check for commands or recursion, eg: !generate !generate
if len(params) > 0 and self.is_command(params[0]):
return "You can't make me do commands, you madman!", False
# Get the starting key and starting sentence.
# If there is more than 1 param, get the last 2 as the key.
# Note that self.s.key_length is fixed to 2 in this implementation
if len(params) > 1:
key = params[-self.s.key_length :]
# Copy the entire params for the sentence
sentences[0] = params.copy()
elif len(params) == 1:
# First we try to find if this word was once used as the first word in a sentence:
key = self.db.get_next_single_start(params[0]) # type: ignore[assignment]
if key is None:
# If this failed, we try to find the next word in the grammar as a whole
key = self.db.get_next_single_initial(0, params[0])
if key is None:
# Return a message that this word hasn't been learned yet
return f'I haven\'t extracted "{params[0]}" from chat yet.', False
# Copy this for the sentence
sentences[0] = key.copy()
else: # if there are no params
# Get starting key
key = self.db.get_start()
if key:
# Copy this for the sentence
sentences[0] = key.copy()
else:
# If nothing's ever been said
return "There is not enough learned information yet.", False
# Counter to prevent infinite loops (i.e. constantly generating <END> while below the
# minimum number of words to generate)
i = 0
while (
self.get_sentence_length(sentences) < self.s.max_sentence_length
and i < self.s.max_sentence_length * 2
):
# Use key to get next word
if i == 0:
# Prevent fetching <END> on the first word
word = self.db.get_next_initial(i, key)
else:
word = self.db.get_next(i, key)
i += 1
if word == "<END>" or word is None:
# Break, unless we are before the min_sentence_length
if i < self.s.min_sentence_length:
key = self.db.get_start()
# Ensure that the key can be generated. Otherwise, we still stop.
if key:
# Start a new sentence
sentences.append([])
for entry in key:
sentences[-1].append(entry)
continue
break
# Otherwise add the word
sentences[-1].append(word)
# Shift the key so on the next iteration it gets the next item
key.pop(0)
key.append(word)
# If there were params, but the sentence resulting is identical to the params
# Then the params did not result in an actual sentence
# If so, restart without params
if len(params) > 0 and params == sentences[0]:
return "I haven't learned what to do with \"" + detokenize(
params[-self.s.key_length :],
) + '" yet.', False
return self.s.sentence_separator.join(
detokenize(sentence) for sentence in sentences
), True
@staticmethod
def get_sentence_length(sentences: list[list[str]]) -> int:
"""Given a list of tokens representing a sentence, return the number of words in there.
Args:
sentences (List[List[str]]): List of lists of tokens that make up a sentence,
where a token is a word or punctuation. For example:
[['Hello', ',', 'you', "'re", 'Tom', '!'], ['Yes', ',', 'I', 'am', '.']]
This would return 6.
Returns:
int: The number of words in the sentence.
"""
count = 0
for sentence in sentences:
for token in sentence:
if token not in string.punctuation and token[0] != "'":
count += 1
return count
@staticmethod
def extract_modifiers(emotes: str) -> list[str]:
"""Extract emote modifiers from emotes such as the horizontal flip.
Args:
emotes (str): String containing all emotes used in the message.
Returns:
list[str]: List of strings that show modifiers, such as "_HZ" for horizontal flip.
"""
output = []
try:
while emotes:
u_index = emotes.index("_")
c_index = emotes.index(":", u_index)
output.append(emotes[u_index:c_index])
emotes = emotes[c_index:]
except ValueError:
pass
return output
def send_whisper(self, user: str, message: str) -> None:
"""Optionally send a whisper, only if "WhisperCooldown" is True.
Args:
user (str): The user to potentially whisper.
message (str): The message to potentially whisper
"""
if self.s.whisper_cooldown:
self.ws.send_whisper(user, message)
@staticmethod
def is_command(message: str) -> bool:
"""True if the message is any command, except /me.
Is used to avoid learning and generating commands.
Args:
message (str): The message to check.
Returns:
bool: True if the message is any potential command (starts with a '!', '/' or '.')
except /me.
"""
return message in list(Commands)
def is_mod(self, username: str, channel: str) -> bool:
"""True if the user is a moderator.
Args:
username (str): The name of the user to check
channel (str): The name of the channel
Returns:
bool: True if the user is a moderator.
"""
return username in self.s.mods or username == channel
if __name__ == "__main__":
MarkovChain()

View file

@ -0,0 +1,118 @@
import json
from pathlib import Path
from typing import Literal
import platformdirs
from loguru import logger
from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
host: str = Field("irc.chat.twitch.tv", alias="Host", serialization_alias="Host")
port: int = Field(6667, alias="Port", serialization_alias="Port")
channel: str = Field(..., alias="Channel", serialization_alias="Channel")
nickname: str = Field(..., alias="Nickname", serialization_alias="Nickname")
authentication: str = Field(
...,
alias="Authentication",
serialization_alias="Authentication",
)
denied_users: list[str] = Field(
[
"StreamElements",
"Nightbot",
"Moobot",
"Marbiebot",
],
alias="DeniedUsers",
serialization_alias="DeniedUsers",
)
banned_words: list[str] = Field(
default_factory=list,
alias="BannedWords",
serialization_alias="BannedWords",
)
mods: list[str] = Field(
default_factory=list,
alias="Mods",
serialization_alias="Mods",
)
cooldown: int = Field(210, alias="Cooldown", serialization_alias="Cooldown")
key_length: int = Field(2, alias="KeyLength", serialization_alias="KeyLength")
max_sentence_length: int = Field(
25,
alias="MaxSentenceWordAmount",
serialization_alias="MaxSentenceWordAmount",
)
min_sentence_length: int = Field(
-1,
alias="MinSentenceWordAmount",
serialization_alias="MinSentenceWordAmount",
)
help_message_timer: int = Field(
60 * 60 * 5,
alias="HelpMessageTimer",
serialization_alias="HelpMessageTimer",
)
automatic_generation_timer: int = Field(
-1,
alias="AutomaticGenerationTimer",
serialization_alias="AutomaticGenerationTimer",
)
whisper_cooldown: bool = Field(
True,
alias="WhisperCooldown",
serialization_alias="WhisperCooldown",
)
enable_generate_command: bool = Field(
True,
alias="EnableGenerateCommand",
serialization_alias="EnableGenerateCommand",
)
sentence_separator: str = Field(
" - ",
alias="SentenceSeparator",
serialization_alias="SentenceSeparator",
)
allow_generate_params: bool = Field(
True,
alias="AllowGenerateParams",
serialization_alias="AllowGenerateParams",
)
log_level: Literal[
"CRITICAL",
"ERROR",
"WARNING",
"INFO",
"DEBUG",
"TRACE",
] = Field("DEBUG", alias="LogLevel")
model_config = SettingsConfigDict(extra="ignore")
@property
def channel_name(self):
return self.channel.replace("#", "").lower()
@classmethod
def read(cls, filepath: Path | None = None) -> "Settings":
if not filepath:
filepath = (
platformdirs.user_config_path("markovbot_gui", ensure_exists=True)
/ "settings.json"
)
with filepath.open("r") as f:
data = json.load(f)
return Settings(**data)
def write(self, filepath: Path | None = None):
if not filepath:
filepath = (
platformdirs.user_config_path("markovbot_gui", ensure_exists=True)
/ "settings.json"
)
with filepath.open("w") as f:
logger.info(f"Writing current settings to {filepath}")
json.dump(self.model_dump(by_alias=True), f, indent=4)

View file

@ -1,6 +1,6 @@
import threading
import logging import logging
from typing import Callable import threading
from collections.abc import Callable
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -12,7 +12,11 @@ class LoopingTimer(threading.Thread):
""" """
def __init__( def __init__(
self, interval: int, target: Callable[[], None], *args, **kwargs self,
interval: int,
target: Callable[[], None],
*args,
**kwargs,
) -> None: ) -> None:
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.interval = interval self.interval = interval

View file

@ -1,26 +1,29 @@
import re import re
from typing import List from typing import ClassVar
from nltk.tokenize.destructive import NLTKWordTokenizer from nltk.tokenize.destructive import NLTKWordTokenizer
from nltk.tokenize.treebank import TreebankWordDetokenizer from nltk.tokenize.treebank import TreebankWordDetokenizer
class MarkovChainTokenizer(NLTKWordTokenizer): class MarkovChainTokenizer(NLTKWordTokenizer):
# Starting quotes. # Starting quotes.
STARTING_QUOTES = [ STARTING_QUOTES: ClassVar[list] = [
(re.compile("([«“‘„]|[`]+)", re.U), r" \1 "), (re.compile("([«“‘„]|[`]+)", re.UNICODE), r" \1 "), # noqa: RUF001
# (re.compile(r"^\""), r"``"), # Custom for MarkovChain: Don't use `` as starting quotes
(re.compile(r"(``)"), r" \1 "), (re.compile(r"(``)"), r" \1 "),
(re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 '' "), (re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 '' "),
(re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d)(\w)\b", re.U), r"\1 \2"), (re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d)(\w)\b", re.UNICODE), r"\1 \2"),
] ]
PUNCTUATION = [ PUNCTUATION: ClassVar[list] = [
(re.compile(r""), r"'"), (re.compile(r""), r"'"), # noqa: RUF001
(re.compile(r'([^\.])(\.)([\]\)}>"\'' "»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "), (
re.compile(r'([^\.])(\.)([\]\)}>"\'' "»”’ " r"]*)\s*$", re.UNICODE), # noqa: RUF001
r"\1 \2 \3 ",
),
(re.compile(r"([:,])([^\d])"), r" \1 \2"), (re.compile(r"([:,])([^\d])"), r" \1 \2"),
(re.compile(r"([:,])$"), r" \1 "), (re.compile(r"([:,])$"), r" \1 "),
# See https://github.com/nltk/nltk/pull/2322 # See https://github.com/nltk/nltk/pull/2322
(re.compile(r"\.{2,}", re.U), r" \g<0> "), (re.compile(r"\.{2,}", re.UNICODE), r" \g<0> "),
# Custom for MarkovChain: Removed the "@" # Custom for MarkovChain: Removed the "@"
(re.compile(r"[;#$%&]"), r" \g<0> "), (re.compile(r"[;#$%&]"), r" \g<0> "),
( (
@ -30,7 +33,7 @@ class MarkovChainTokenizer(NLTKWordTokenizer):
(re.compile(r"[?!]"), r" \g<0> "), (re.compile(r"[?!]"), r" \g<0> "),
(re.compile(r"([^'])' "), r"\1 ' "), (re.compile(r"([^'])' "), r"\1 ' "),
# See https://github.com/nltk/nltk/pull/2322 # See https://github.com/nltk/nltk/pull/2322
(re.compile(r"[*]", re.U), r" \g<0> "), (re.compile(r"[*]", re.UNICODE), r" \g<0> "),
] ]
@ -49,14 +52,14 @@ EMOTICON_RE = re.compile(
| |
<3 # heart <3 # heart
)""", )""",
re.VERBOSE | re.I | re.UNICODE, re.VERBOSE | re.IGNORECASE | re.UNICODE,
) )
_tokenize = MarkovChainTokenizer().tokenize _tokenize = MarkovChainTokenizer().tokenize
_detokenize = TreebankWordDetokenizer().tokenize _detokenize = TreebankWordDetokenizer().tokenize
def tokenize(sentence: str) -> List[str]: def tokenize(sentence: str) -> list[str]:
"""Word tokenize, separating commas, dots, apostrophes, etc. """Word tokenize, separating commas, dots, apostrophes, etc.
Uses nltk's `NLTKWordTokenizer`, but does not consider "@" to be punctuation. Uses nltk's `NLTKWordTokenizer`, but does not consider "@" to be punctuation.
@ -68,7 +71,7 @@ def tokenize(sentence: str) -> List[str]:
sentence (str): Input sentence. sentence (str): Input sentence.
Returns: Returns:
List[str]: Tokenized output of the sentence. list[str]: Tokenized output of the sentence.
""" """
output = [] output = []
@ -85,7 +88,7 @@ def tokenize(sentence: str) -> List[str]:
return output return output
def detokenize(tokenized: List[str]) -> str: def detokenize(tokenized: list[str]) -> str:
"""Detokenize a tokenized list of words and punctuation. """Detokenize a tokenized list of words and punctuation.
Converted in a less naïve way than `" ".join(tokenized)` Converted in a less naïve way than `" ".join(tokenized)`
@ -107,7 +110,7 @@ def detokenize(tokenized: List[str]) -> str:
index for index, token in enumerate(tokenized) if token in ("''", "'", '"') index for index, token in enumerate(tokenized) if token in ("''", "'", '"')
] ]
# Replace '' with ", works better with more recent NLTK versions # Replace '' with ", works better with more recent NLTK versions
tokenized_copy = [token if token != "''" else '"' for token in tokenized] tokenized_copy = [token if token != "''" else '"' for token in tokenized] # noqa: S105
# We get the reverse of the enumerate, as we modify the list we took the indices from # We get the reverse of the enumerate, as we modify the list we took the indices from
enumerated = list(enumerate(indices)) enumerated = list(enumerate(indices))
@ -117,15 +120,13 @@ def detokenize(tokenized: List[str]) -> str:
# If there is another word, merge with that word and prepend a space # If there is another word, merge with that word and prepend a space
if len(tokenized) > index + 1: if len(tokenized) > index + 1:
tokenized_copy[index : index + 2] = [ tokenized_copy[index : index + 2] = [
"".join(tokenized_copy[index : index + 2]) "".join(tokenized_copy[index : index + 2]),
] ]
# Closing quote # Closing quote
else: elif index > 0:
# If there is a previous word, merge with that word and append a space tokenized_copy[index - 1 : index + 1] = [
if index > 0: "".join(tokenized_copy[index - 1 : index + 1]),
tokenized_copy[index - 1 : index + 1] = [ ]
"".join(tokenized_copy[index - 1 : index + 1])
]
return _detokenize(tokenized_copy).strip() return _detokenize(tokenized_copy).strip()

View file

@ -18,7 +18,7 @@ class BotApp(App):
) )
def run_bot(self, instance): def run_bot(self, instance):
bot_runner = BotRunner(config_path=self.config_path) bot_runner = BotRunner(settings_path=self.config_path)
popup = Popup( popup = Popup(
title="Bot Running", title="Bot Running",
content=bot_runner, content=bot_runner,
@ -34,7 +34,7 @@ class BotApp(App):
title=f"Bot Configuration, available at {self.config_path}", title=f"Bot Configuration, available at {self.config_path}",
content=config_window, content=config_window,
size_hint=(None, None), size_hint=(None, None),
size=(dp(400), dp(300)), size=(dp(400), dp(400)),
auto_dismiss=False, auto_dismiss=False,
) )

204
uv.lock generated
View file

@ -1,5 +1,9 @@
version = 1 version = 1
requires-python = ">=3.11" requires-python = ">=3.11"
resolution-markers = [
"python_full_version < '3.13'",
"python_full_version >= '3.13'",
]
[[package]] [[package]]
name = "altgraph" name = "altgraph"
@ -10,6 +14,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4d/3f/3bc3f1d83f6e4a7fcb834d3720544ca597590425be5ba9db032b2bf322a2/altgraph-0.17.4-py2.py3-none-any.whl", hash = "sha256:642743b4750de17e655e6711601b077bc6598dbfa3ba5fa2b2a35ce12b508dff", size = 21212 }, { url = "https://files.pythonhosted.org/packages/4d/3f/3bc3f1d83f6e4a7fcb834d3720544ca597590425be5ba9db032b2bf322a2/altgraph-0.17.4-py2.py3-none-any.whl", hash = "sha256:642743b4750de17e655e6711601b077bc6598dbfa3ba5fa2b2a35ce12b508dff", size = 21212 },
] ]
[[package]]
name = "annotated-types"
version = "0.7.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
]
[[package]] [[package]]
name = "certifi" name = "certifi"
version = "2024.8.30" version = "2024.8.30"
@ -206,6 +219,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/6a/55/cd1555bde62f809219cbc5d8a0836b0293399da2f4ba4e8ee84b6a7cc393/Kivy_Garden-0.1.5-py3-none-any.whl", hash = "sha256:ef50f44b96358cf10ac5665f27a4751bb34ef54051c54b93af891f80afe42929", size = 4623 }, { url = "https://files.pythonhosted.org/packages/6a/55/cd1555bde62f809219cbc5d8a0836b0293399da2f4ba4e8ee84b6a7cc393/Kivy_Garden-0.1.5-py3-none-any.whl", hash = "sha256:ef50f44b96358cf10ac5665f27a4751bb34ef54051c54b93af891f80afe42929", size = 4623 },
] ]
[[package]]
name = "loguru"
version = "0.7.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "win32-setctime", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/9e/30/d87a423766b24db416a46e9335b9602b054a72b96a88a241f2b09b560fa8/loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac", size = 145103 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/03/0a/4f6fed21aa246c6b49b561ca55facacc2a44b87d65b8b92362a8e99ba202/loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb", size = 62549 },
]
[[package]] [[package]]
name = "macholib" name = "macholib"
version = "1.16.3" version = "1.16.3"
@ -224,30 +250,79 @@ version = "0.1.0"
source = { virtual = "." } source = { virtual = "." }
dependencies = [ dependencies = [
{ name = "kivy", extra = ["base"] }, { name = "kivy", extra = ["base"] },
{ name = "loguru" },
{ name = "nltk" }, { name = "nltk" },
{ name = "pillow" }, { name = "pillow" },
{ name = "platformdirs" }, { name = "platformdirs" },
{ name = "pydantic" },
{ name = "pydantic-settings" },
{ name = "pyinstaller" }, { name = "pyinstaller" },
{ name = "twitchwebsocket" }, { name = "twitchwebsocket" },
] ]
[package.dev-dependencies] [package.dependency-groups]
dev = [ dev = [
{ name = "mypy" },
{ name = "pyright" },
{ name = "ruff" }, { name = "ruff" },
] ]
[package.metadata] [package.metadata]
requires-dist = [ requires-dist = [
{ name = "kivy", extras = ["base"], specifier = ">=2.3.0" }, { name = "kivy", extras = ["base"], specifier = ">=2.3.0" },
{ name = "loguru", specifier = ">=0.7.2" },
{ name = "nltk", specifier = ">=3.9.1" }, { name = "nltk", specifier = ">=3.9.1" },
{ name = "pillow", specifier = ">=10.4.0" }, { name = "pillow", specifier = ">=10.4.0" },
{ name = "platformdirs", specifier = ">=4.3.6" }, { name = "platformdirs", specifier = ">=4.3.6" },
{ name = "pydantic", specifier = ">=2.9.2" },
{ name = "pydantic-settings", specifier = ">=2.6.0" },
{ name = "pyinstaller", specifier = ">=6.11.0" }, { name = "pyinstaller", specifier = ">=6.11.0" },
{ name = "twitchwebsocket", specifier = ">=1.2.1" }, { name = "twitchwebsocket", specifier = ">=1.2.1" },
] ]
[package.metadata.requires-dev] [package.metadata.dependency-groups]
dev = [{ name = "ruff", specifier = ">=0.7.0" }] dev = [
{ name = "mypy", specifier = ">=1.13.0" },
{ name = "pyright", specifier = ">=1.1.387" },
{ name = "ruff", specifier = ">=0.7.0" },
]
[[package]]
name = "mypy"
version = "1.13.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mypy-extensions" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e8/21/7e9e523537991d145ab8a0a2fd98548d67646dc2aaaf6091c31ad883e7c1/mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e", size = 3152532 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/19/de0822609e5b93d02579075248c7aa6ceaddcea92f00bf4ea8e4c22e3598/mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d", size = 10939027 },
{ url = "https://files.pythonhosted.org/packages/c8/71/6950fcc6ca84179137e4cbf7cf41e6b68b4a339a1f5d3e954f8c34e02d66/mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d", size = 10108699 },
{ url = "https://files.pythonhosted.org/packages/26/50/29d3e7dd166e74dc13d46050b23f7d6d7533acf48f5217663a3719db024e/mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b", size = 12506263 },
{ url = "https://files.pythonhosted.org/packages/3f/1d/676e76f07f7d5ddcd4227af3938a9c9640f293b7d8a44dd4ff41d4db25c1/mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73", size = 12984688 },
{ url = "https://files.pythonhosted.org/packages/9c/03/5a85a30ae5407b1d28fab51bd3e2103e52ad0918d1e68f02a7778669a307/mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca", size = 9626811 },
{ url = "https://files.pythonhosted.org/packages/fb/31/c526a7bd2e5c710ae47717c7a5f53f616db6d9097caf48ad650581e81748/mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5", size = 11077900 },
{ url = "https://files.pythonhosted.org/packages/83/67/b7419c6b503679d10bd26fc67529bc6a1f7a5f220bbb9f292dc10d33352f/mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e", size = 10074818 },
{ url = "https://files.pythonhosted.org/packages/ba/07/37d67048786ae84e6612575e173d713c9a05d0ae495dde1e68d972207d98/mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2", size = 12589275 },
{ url = "https://files.pythonhosted.org/packages/1f/17/b1018c6bb3e9f1ce3956722b3bf91bff86c1cefccca71cec05eae49d6d41/mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0", size = 13037783 },
{ url = "https://files.pythonhosted.org/packages/cb/32/cd540755579e54a88099aee0287086d996f5a24281a673f78a0e14dba150/mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2", size = 9726197 },
{ url = "https://files.pythonhosted.org/packages/11/bb/ab4cfdc562cad80418f077d8be9b4491ee4fb257440da951b85cbb0a639e/mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7", size = 11069721 },
{ url = "https://files.pythonhosted.org/packages/59/3b/a393b1607cb749ea2c621def5ba8c58308ff05e30d9dbdc7c15028bca111/mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62", size = 10063996 },
{ url = "https://files.pythonhosted.org/packages/d1/1f/6b76be289a5a521bb1caedc1f08e76ff17ab59061007f201a8a18cc514d1/mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8", size = 12584043 },
{ url = "https://files.pythonhosted.org/packages/a6/83/5a85c9a5976c6f96e3a5a7591aa28b4a6ca3a07e9e5ba0cec090c8b596d6/mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7", size = 13036996 },
{ url = "https://files.pythonhosted.org/packages/b4/59/c39a6f752f1f893fccbcf1bdd2aca67c79c842402b5283563d006a67cf76/mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc", size = 9737709 },
{ url = "https://files.pythonhosted.org/packages/3b/86/72ce7f57431d87a7ff17d442f521146a6585019eb8f4f31b7c02801f78ad/mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a", size = 2647043 },
]
[[package]]
name = "mypy-extensions"
version = "1.0.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/98/a4/1ab47638b92648243faf97a5aeb6ea83059cc3624972ab6b8d2316078d3f/mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782", size = 4433 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/e2/5d3f6ada4297caebe1a2add3b126fe800c96f56dbe5d1988a2cbe0b267aa/mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", size = 4695 },
]
[[package]] [[package]]
name = "nltk" name = "nltk"
@ -264,6 +339,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4d/66/7d9e26593edda06e8cb531874633f7c2372279c3b0f46235539fe546df8b/nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1", size = 1505442 }, { url = "https://files.pythonhosted.org/packages/4d/66/7d9e26593edda06e8cb531874633f7c2372279c3b0f46235539fe546df8b/nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1", size = 1505442 },
] ]
[[package]]
name = "nodeenv"
version = "1.9.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314 },
]
[[package]] [[package]]
name = "packaging" name = "packaging"
version = "24.1" version = "24.1"
@ -332,6 +416,80 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 },
] ]
[[package]]
name = "pydantic"
version = "2.9.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-types" },
{ name = "pydantic-core" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/a9/b7/d9e3f12af310e1120c21603644a1cd86f59060e040ec5c3a80b8f05fae30/pydantic-2.9.2.tar.gz", hash = "sha256:d155cef71265d1e9807ed1c32b4c8deec042a44a50a4188b25ac67ecd81a9c0f", size = 769917 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/df/e4/ba44652d562cbf0bf320e0f3810206149c8a4e99cdbf66da82e97ab53a15/pydantic-2.9.2-py3-none-any.whl", hash = "sha256:f048cec7b26778210e28a0459867920654d48e5e62db0958433636cde4254f12", size = 434928 },
]
[[package]]
name = "pydantic-core"
version = "2.23.4"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/e2/aa/6b6a9b9f8537b872f552ddd46dd3da230367754b6f707b8e1e963f515ea3/pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863", size = 402156 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5d/30/890a583cd3f2be27ecf32b479d5d615710bb926d92da03e3f7838ff3e58b/pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8", size = 1865160 },
{ url = "https://files.pythonhosted.org/packages/1d/9a/b634442e1253bc6889c87afe8bb59447f106ee042140bd57680b3b113ec7/pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d", size = 1776777 },
{ url = "https://files.pythonhosted.org/packages/75/9a/7816295124a6b08c24c96f9ce73085032d8bcbaf7e5a781cd41aa910c891/pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e", size = 1799244 },
{ url = "https://files.pythonhosted.org/packages/a9/8f/89c1405176903e567c5f99ec53387449e62f1121894aa9fc2c4fdc51a59b/pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff02b6d461a6de369f07ec15e465a88895f3223eb75073ffea56b84d9331f607", size = 1805307 },
{ url = "https://files.pythonhosted.org/packages/d5/a5/1a194447d0da1ef492e3470680c66048fef56fc1f1a25cafbea4bc1d1c48/pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996a38a83508c54c78a5f41456b0103c30508fed9abcad0a59b876d7398f25fd", size = 2000663 },
{ url = "https://files.pythonhosted.org/packages/13/a5/1df8541651de4455e7d587cf556201b4f7997191e110bca3b589218745a5/pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d97683ddee4723ae8c95d1eddac7c192e8c552da0c73a925a89fa8649bf13eea", size = 2655941 },
{ url = "https://files.pythonhosted.org/packages/44/31/a3899b5ce02c4316865e390107f145089876dff7e1dfc770a231d836aed8/pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:216f9b2d7713eb98cb83c80b9c794de1f6b7e3145eef40400c62e86cee5f4e1e", size = 2052105 },
{ url = "https://files.pythonhosted.org/packages/1b/aa/98e190f8745d5ec831f6d5449344c48c0627ac5fed4e5340a44b74878f8e/pydantic_core-2.23.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6f783e0ec4803c787bcea93e13e9932edab72068f68ecffdf86a99fd5918878b", size = 1919967 },
{ url = "https://files.pythonhosted.org/packages/ae/35/b6e00b6abb2acfee3e8f85558c02a0822e9a8b2f2d812ea8b9079b118ba0/pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d0776dea117cf5272382634bd2a5c1b6eb16767c223c6a5317cd3e2a757c61a0", size = 1964291 },
{ url = "https://files.pythonhosted.org/packages/13/46/7bee6d32b69191cd649bbbd2361af79c472d72cb29bb2024f0b6e350ba06/pydantic_core-2.23.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5f7a395a8cf1621939692dba2a6b6a830efa6b3cee787d82c7de1ad2930de64", size = 2109666 },
{ url = "https://files.pythonhosted.org/packages/39/ef/7b34f1b122a81b68ed0a7d0e564da9ccdc9a2924c8d6c6b5b11fa3a56970/pydantic_core-2.23.4-cp311-none-win32.whl", hash = "sha256:74b9127ffea03643e998e0c5ad9bd3811d3dac8c676e47db17b0ee7c3c3bf35f", size = 1732940 },
{ url = "https://files.pythonhosted.org/packages/2f/76/37b7e76c645843ff46c1d73e046207311ef298d3f7b2f7d8f6ac60113071/pydantic_core-2.23.4-cp311-none-win_amd64.whl", hash = "sha256:98d134c954828488b153d88ba1f34e14259284f256180ce659e8d83e9c05eaa3", size = 1916804 },
{ url = "https://files.pythonhosted.org/packages/74/7b/8e315f80666194b354966ec84b7d567da77ad927ed6323db4006cf915f3f/pydantic_core-2.23.4-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f3e0da4ebaef65158d4dfd7d3678aad692f7666877df0002b8a522cdf088f231", size = 1856459 },
{ url = "https://files.pythonhosted.org/packages/14/de/866bdce10ed808323d437612aca1ec9971b981e1c52e5e42ad9b8e17a6f6/pydantic_core-2.23.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f69a8e0b033b747bb3e36a44e7732f0c99f7edd5cea723d45bc0d6e95377ffee", size = 1770007 },
{ url = "https://files.pythonhosted.org/packages/dc/69/8edd5c3cd48bb833a3f7ef9b81d7666ccddd3c9a635225214e044b6e8281/pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723314c1d51722ab28bfcd5240d858512ffd3116449c557a1336cbe3919beb87", size = 1790245 },
{ url = "https://files.pythonhosted.org/packages/80/33/9c24334e3af796ce80d2274940aae38dd4e5676298b4398eff103a79e02d/pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb2802e667b7051a1bebbfe93684841cc9351004e2badbd6411bf357ab8d5ac8", size = 1801260 },
{ url = "https://files.pythonhosted.org/packages/a5/6f/e9567fd90104b79b101ca9d120219644d3314962caa7948dd8b965e9f83e/pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d18ca8148bebe1b0a382a27a8ee60350091a6ddaf475fa05ef50dc35b5df6327", size = 1996872 },
{ url = "https://files.pythonhosted.org/packages/2d/ad/b5f0fe9e6cfee915dd144edbd10b6e9c9c9c9d7a56b69256d124b8ac682e/pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33e3d65a85a2a4a0dc3b092b938a4062b1a05f3a9abde65ea93b233bca0e03f2", size = 2661617 },
{ url = "https://files.pythonhosted.org/packages/06/c8/7d4b708f8d05a5cbfda3243aad468052c6e99de7d0937c9146c24d9f12e9/pydantic_core-2.23.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:128585782e5bfa515c590ccee4b727fb76925dd04a98864182b22e89a4e6ed36", size = 2071831 },
{ url = "https://files.pythonhosted.org/packages/89/4d/3079d00c47f22c9a9a8220db088b309ad6e600a73d7a69473e3a8e5e3ea3/pydantic_core-2.23.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:68665f4c17edcceecc112dfed5dbe6f92261fb9d6054b47d01bf6371a6196126", size = 1917453 },
{ url = "https://files.pythonhosted.org/packages/e9/88/9df5b7ce880a4703fcc2d76c8c2d8eb9f861f79d0c56f4b8f5f2607ccec8/pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20152074317d9bed6b7a95ade3b7d6054845d70584216160860425f4fbd5ee9e", size = 1968793 },
{ url = "https://files.pythonhosted.org/packages/e3/b9/41f7efe80f6ce2ed3ee3c2dcfe10ab7adc1172f778cc9659509a79518c43/pydantic_core-2.23.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9261d3ce84fa1d38ed649c3638feefeae23d32ba9182963e465d58d62203bd24", size = 2116872 },
{ url = "https://files.pythonhosted.org/packages/63/08/b59b7a92e03dd25554b0436554bf23e7c29abae7cce4b1c459cd92746811/pydantic_core-2.23.4-cp312-none-win32.whl", hash = "sha256:4ba762ed58e8d68657fc1281e9bb72e1c3e79cc5d464be146e260c541ec12d84", size = 1738535 },
{ url = "https://files.pythonhosted.org/packages/88/8d/479293e4d39ab409747926eec4329de5b7129beaedc3786eca070605d07f/pydantic_core-2.23.4-cp312-none-win_amd64.whl", hash = "sha256:97df63000f4fea395b2824da80e169731088656d1818a11b95f3b173747b6cd9", size = 1917992 },
{ url = "https://files.pythonhosted.org/packages/ad/ef/16ee2df472bf0e419b6bc68c05bf0145c49247a1095e85cee1463c6a44a1/pydantic_core-2.23.4-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7530e201d10d7d14abce4fb54cfe5b94a0aefc87da539d0346a484ead376c3cc", size = 1856143 },
{ url = "https://files.pythonhosted.org/packages/da/fa/bc3dbb83605669a34a93308e297ab22be82dfb9dcf88c6cf4b4f264e0a42/pydantic_core-2.23.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:df933278128ea1cd77772673c73954e53a1c95a4fdf41eef97c2b779271bd0bd", size = 1770063 },
{ url = "https://files.pythonhosted.org/packages/4e/48/e813f3bbd257a712303ebdf55c8dc46f9589ec74b384c9f652597df3288d/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cb3da3fd1b6a5d0279a01877713dbda118a2a4fc6f0d821a57da2e464793f05", size = 1790013 },
{ url = "https://files.pythonhosted.org/packages/b4/e0/56eda3a37929a1d297fcab1966db8c339023bcca0b64c5a84896db3fcc5c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c6dcb030aefb668a2b7009c85b27f90e51e6a3b4d5c9bc4c57631292015b0d", size = 1801077 },
{ url = "https://files.pythonhosted.org/packages/04/be/5e49376769bfbf82486da6c5c1683b891809365c20d7c7e52792ce4c71f3/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:696dd8d674d6ce621ab9d45b205df149399e4bb9aa34102c970b721554828510", size = 1996782 },
{ url = "https://files.pythonhosted.org/packages/bc/24/e3ee6c04f1d58cc15f37bcc62f32c7478ff55142b7b3e6d42ea374ea427c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2971bb5ffe72cc0f555c13e19b23c85b654dd2a8f7ab493c262071377bfce9f6", size = 2661375 },
{ url = "https://files.pythonhosted.org/packages/c1/f8/11a9006de4e89d016b8de74ebb1db727dc100608bb1e6bbe9d56a3cbbcce/pydantic_core-2.23.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8394d940e5d400d04cad4f75c0598665cbb81aecefaca82ca85bd28264af7f9b", size = 2071635 },
{ url = "https://files.pythonhosted.org/packages/7c/45/bdce5779b59f468bdf262a5bc9eecbae87f271c51aef628d8c073b4b4b4c/pydantic_core-2.23.4-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0dff76e0602ca7d4cdaacc1ac4c005e0ce0dcfe095d5b5259163a80d3a10d327", size = 1916994 },
{ url = "https://files.pythonhosted.org/packages/d8/fa/c648308fe711ee1f88192cad6026ab4f925396d1293e8356de7e55be89b5/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7d32706badfe136888bdea71c0def994644e09fff0bfe47441deaed8e96fdbc6", size = 1968877 },
{ url = "https://files.pythonhosted.org/packages/16/16/b805c74b35607d24d37103007f899abc4880923b04929547ae68d478b7f4/pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f", size = 2116814 },
{ url = "https://files.pythonhosted.org/packages/d1/58/5305e723d9fcdf1c5a655e6a4cc2a07128bf644ff4b1d98daf7a9dbf57da/pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769", size = 1738360 },
{ url = "https://files.pythonhosted.org/packages/a5/ae/e14b0ff8b3f48e02394d8acd911376b7b66e164535687ef7dc24ea03072f/pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5", size = 1919411 },
]
[[package]]
name = "pydantic-settings"
version = "2.6.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pydantic" },
{ name = "python-dotenv" },
]
sdist = { url = "https://files.pythonhosted.org/packages/6c/66/5f1a9da10675bfb3b9da52f5b689c77e0a5612263fcce510cfac3e99a168/pydantic_settings-2.6.0.tar.gz", hash = "sha256:44a1804abffac9e6a30372bb45f6cafab945ef5af25e66b1c634c01dd39e0188", size = 75232 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/34/19/26bb6bdb9fdad5f0dfce538780814084fb667b4bc37fcb28459c14b8d3b5/pydantic_settings-2.6.0-py3-none-any.whl", hash = "sha256:4a819166f119b74d7f8c765196b165f95cc7487ce58ea27dec8a5a26be0970e0", size = 28578 },
]
[[package]] [[package]]
name = "pygments" name = "pygments"
version = "2.18.0" version = "2.18.0"
@ -394,6 +552,28 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/1b/2f292bbd742e369a100c91faa0483172cd91a1a422a6692055ac920946c5/pypiwin32-223-py3-none-any.whl", hash = "sha256:67adf399debc1d5d14dffc1ab5acacb800da569754fafdc576b2a039485aa775", size = 1674 }, { url = "https://files.pythonhosted.org/packages/d0/1b/2f292bbd742e369a100c91faa0483172cd91a1a422a6692055ac920946c5/pypiwin32-223-py3-none-any.whl", hash = "sha256:67adf399debc1d5d14dffc1ab5acacb800da569754fafdc576b2a039485aa775", size = 1674 },
] ]
[[package]]
name = "pyright"
version = "1.1.387"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "nodeenv" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c2/32/e7187478d3105d6d7edc9b754d56472ee06557c25cc404911288fee1796a/pyright-1.1.387.tar.gz", hash = "sha256:577de60224f7fe36505d5b181231e3a395d427b7873be0bbcaa962a29ea93a60", size = 21939 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a0/18/c497df36641b0572f5bd59ae147b08ccaa6b8086397d50e1af97cc2ddcf6/pyright-1.1.387-py3-none-any.whl", hash = "sha256:6a1f495a261a72e12ad17e20d1ae3df4511223c773b19407cfa006229b1b08a5", size = 18577 },
]
[[package]]
name = "python-dotenv"
version = "1.0.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 },
]
[[package]] [[package]]
name = "pywin32" name = "pywin32"
version = "308" version = "308"
@ -542,6 +722,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fe/d8/4dcd312dd333f1e0664afb9a91672a684d188eb2dc18c1e6deb4901364d7/TwitchWebsocket-1.2.1-py2.py3-none-any.whl", hash = "sha256:f24a12b7bf68d9e348abeb317b63710813b44e8aadbebacdfd1077a8e5bcdfbd", size = 11897 }, { url = "https://files.pythonhosted.org/packages/fe/d8/4dcd312dd333f1e0664afb9a91672a684d188eb2dc18c1e6deb4901364d7/TwitchWebsocket-1.2.1-py2.py3-none-any.whl", hash = "sha256:f24a12b7bf68d9e348abeb317b63710813b44e8aadbebacdfd1077a8e5bcdfbd", size = 11897 },
] ]
[[package]]
name = "typing-extensions"
version = "4.12.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438 },
]
[[package]] [[package]]
name = "urllib3" name = "urllib3"
version = "2.2.3" version = "2.2.3"
@ -550,3 +739,12 @@ sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b763
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 }, { url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 },
] ]
[[package]]
name = "win32-setctime"
version = "1.1.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/6b/dd/f95a13d2b235a28d613ba23ebad55191514550debb968b46aab99f2e3a30/win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2", size = 3676 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0a/e6/a7d828fef907843b2a5773ebff47fb79ac0c1c88d60c0ca9530ee941e248/win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad", size = 3604 },
]