feat: initial commit

This commit is contained in:
cătălin 2024-10-22 22:57:43 +02:00
commit beab2ff3a5
No known key found for this signature in database
24 changed files with 3506 additions and 0 deletions

112
.gitignore vendored Normal file
View file

@ -0,0 +1,112 @@
.idea/
*~
.fuse_hidden*
.directory
.Trash-*
.nfs*
.Python
[Bb]in
[Ii]nclude
[Ll]ib
[Ll]ib64
[Ll]ocal
[Ss]cripts
pyvenv.cfg
.venv
pip-selfcheck.json
*.patch
*.diff
*.bak
*.gho
*.ori
*.orig
*.tmp
[._]*.s[a-v][a-z]
!*.svg # comment out if you don't need vector files
[._]*.sw[a-p]
[._]s[a-rt-v][a-z]
[._]ss[a-gi-z]
[._]sw[a-p]
Session.vim
Sessionx.vim
.netrwhist
tags
[._]*.un~
__pycache__/
*.py[cod]
*$py.class
*.so
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
*.manifest
pip-log.txt
pip-delete-this-directory.txt
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
*.mo
*.pot
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
instance/
.webassets-cache
.scrapy
docs/_build/
.pybuilder/
target/
.ipynb_checkpoints
profile_default/
ipython_config.py
__pypackages__/
celerybeat-schedule
celerybeat.pid
*.sage.py
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
.spyderproject
.spyproject
.ropeproject
/site
.mypy_cache/
.dmypy.json
dmypy.json
.pyre/
.pytype/
cython_debug/
report.xml
.pdm-python
reportlog.json
.ruff_cache/
.pdm.toml
requirements.txt

3
.gitmodules vendored Normal file
View file

@ -0,0 +1,3 @@
[submodule "TwitchMarkovChain"]
path = TwitchMarkovChain
url = https://github.com/tomaarsen/TwitchMarkovChain.git

1
.python-version Normal file
View file

@ -0,0 +1 @@
3.11

0
README.md Normal file
View file

44
__init__.spec Normal file
View file

@ -0,0 +1,44 @@
# -*- mode: python ; coding: utf-8 -*-
a = Analysis(
['src/__init__.py', 'src/markovbot_gui/__init__.py', 'src/markovbot_gui/bot_runner.py', 'src/markovbot_gui/config_window.py', 'src/markovbot_gui/lib/__init__.py', 'src/markovbot_gui/lib/Database.py', 'src/markovbot_gui/lib/Log.py', 'src/markovbot_gui/lib/MarkovChainBot.py', 'src/markovbot_gui/lib/Settings.py', 'src/markovbot_gui/lib/Timer.py', 'src/markovbot_gui/lib/Tokenizer.py', 'src/markovbot_gui/log_handler.py', 'src/markovbot_gui/main.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
optimize=0,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='__init__',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT(
exe,
a.binaries,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='__init__',
)

46
markovbot.spec Normal file
View file

@ -0,0 +1,46 @@
# -*- mode: python ; coding: utf-8 -*-
from kivy.deps import sdl2, glew
a = Analysis(
['src/markovbot_gui/main.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
noarchive=False,
optimize=0,
)
pyz = PYZ(a.pure)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='markovbot',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
)
coll = COLLECT(
exe,
a.binaries,
a.datas,
*[Tree(p) for p in (sdl2.dep_bins + glew.dep_bins)],
strip=False,
upx=True,
upx_exclude=[],
name='markovbot',
)

19
pyproject.toml Normal file
View file

@ -0,0 +1,19 @@
[project]
name = "markovbot-gui"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.11"
dependencies = [
"kivy[base]>=2.3.0",
"nltk>=3.9.1",
"pillow>=10.4.0",
"platformdirs>=4.3.6",
"pyinstaller>=6.11.0",
"twitchwebsocket>=1.2.1",
]
[tool.uv]
dev-dependencies = [
"ruff>=0.7.0",
]

0
src/__init__.py Normal file
View file

View file

View file

@ -0,0 +1,2 @@
<start>
<end>

View file

@ -0,0 +1,132 @@
import json
import logging
import queue
import threading
from pathlib import Path
from kivy.clock import Clock
from kivy.metrics import dp
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.textinput import TextInput
from src.markovbot_gui.libs.MarkovChainBot import MarkovChain
from src.markovbot_gui.log_handler import LogHandler
class BotRunner(BoxLayout):
def __init__(self, config_path: Path, **kwargs):
super().__init__(**kwargs)
self.config_path = config_path
self.orientation = "vertical"
self.spacing = dp(10)
self.padding = dp(20)
self.bot_thread = None
self.bot_instance = None
self.log_queue = queue.Queue()
# Create log display
self.log_display = TextInput(multiline=True, readonly=True, size_hint=(1, 1))
self.add_widget(self.log_display)
# Create button layout
button_layout = BoxLayout(
orientation="horizontal", size_hint=(1, None), height=dp(40), spacing=dp(10)
)
# Create start button
self.start_button = Button(
text="Start Bot",
size_hint=(None, None),
size=(dp(100), dp(40)),
)
self.start_button.bind(on_release=self.start_bot)
button_layout.add_widget(self.start_button)
# Create stop button
self.stop_button = Button(
text="Stop Bot",
size_hint=(None, None),
size=(dp(100), dp(40)),
disabled=True, # Initially disabled as bot isn't running
)
self.stop_button.bind(on_release=self.stop_bot)
button_layout.add_widget(self.stop_button)
self.add_widget(button_layout)
# Start log update scheduling
Clock.schedule_interval(self.update_log, 0.1)
# Configure logging
self.setup_logging()
def setup_logging(self):
# Create and configure the log handler
handler = LogHandler(self.log_queue)
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
# Get root logger and add our handler
root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
def start_bot(self, instance=None):
try:
# Load configuration
if not self.config_path.exists():
raise FileNotFoundError(
f"Configuration file not found at {self.config_path}"
)
with open(self.config_path) as f:
config = json.load(f)
# Create and start bot thread
self.bot_thread = threading.Thread(
target=self.run_bot_thread, args=(config,), daemon=True
)
self.bot_thread.start()
# Update button states
self.start_button.disabled = True
self.stop_button.disabled = False
logging.info("Starting bot...")
except Exception as e:
logging.error(f"Failed to start bot: {e}")
def run_bot_thread(self, config):
try:
self.bot = MarkovChain(self.config_path)
self.bot.run_bot()
except Exception as e:
logging.error(f"Bot error: {e}")
finally:
# Always re-enable start button and disable stop button when bot stops
Clock.schedule_once(lambda dt: self.reset_button_states(), 0)
def stop_bot(self, _=None):
self.bot.stop_bot()
# Wait for thread to finish
if self.bot_thread and self.bot_thread.is_alive():
self.bot_thread.join(timeout=1.0)
logging.info("Bot stopped")
self.reset_button_states()
def reset_button_states(self):
self.start_button.disabled = False
self.stop_button.disabled = True
def update_log(self, dt):
# Get all new log messages
while not self.log_queue.empty():
message = self.log_queue.get()
self.log_display.text += message + "\n"
# Auto-scroll to bottom
self.log_display.cursor = (0, len(self.log_display.text))

View file

@ -0,0 +1,140 @@
import json
import logging
from pathlib import Path
from kivy.clock import Clock
from kivy.metrics import dp
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
class ConfigWindow(BoxLayout):
def __init__(self, config_path: Path, **kwargs):
super().__init__(**kwargs)
self.config_path = config_path
self.orientation = "vertical"
self.spacing = dp(10)
self.padding = dp(20)
# Load existing configuration
self.default_config = {
"Host": "irc.chat.twitch.tv",
"Port": 6667,
"Channel": "#<channel>",
"Nickname": "<name>",
"Authentication": "oauth:<auth>",
"DeniedUsers": ["StreamElements", "Nightbot", "Moobot", "Marbiebot"],
"AllowedUsers": [],
"Cooldown": 20,
"KeyLength": 2,
"MaxSentenceWordAmount": 25,
"MinSentenceWordAmount": -1,
"HelpMessageTimer": 60 * 60 * 5, # 18000 seconds, 5 hours
"AutomaticGenerationTimer": -1,
"WhisperCooldown": True,
"EnableGenerateCommand": True,
"SentenceSeparator": " - ",
"AllowGenerateParams": True,
"GenerateCommands": ["!generate", "!g"],
}
try:
if config_path.exists():
with open(config_path) as f:
saved_config = json.load(f)
# Update self.default_config with saved values
self.default_config.update(saved_config)
except json.JSONDecodeError:
logging.error(f"Failed to parse config file at {config_path}")
except Exception as e:
logging.error(f"Error loading config file: {e}")
# Create widgets
# Channel input
channel_layout = BoxLayout(
orientation="horizontal", size_hint_y=None, height=dp(40)
)
channel_label = Label(text="Channel:", size_hint_x=0.3)
self.channel_input = TextInput(
multiline=False, size_hint_x=0.7, text=self.default_config["Channel"]
)
channel_layout.add_widget(channel_label)
channel_layout.add_widget(self.channel_input)
# Nickname input
nickname_layout = BoxLayout(
orientation="horizontal", size_hint_y=None, height=dp(40)
)
nickname_label = Label(text="Nickname:", size_hint_x=0.3)
self.nickname_input = TextInput(
multiline=False, size_hint_x=0.7, text=self.default_config["Nickname"]
)
nickname_layout.add_widget(nickname_label)
nickname_layout.add_widget(self.nickname_input)
# Authentication input
auth_layout = BoxLayout(
orientation="horizontal", size_hint_y=None, height=dp(40)
)
auth_label = Label(text="Auth:", size_hint_x=0.3)
self.auth_input = TextInput(
multiline=False,
size_hint_x=0.7,
password=True,
text=self.default_config["Authentication"],
)
auth_layout.add_widget(auth_label)
auth_layout.add_widget(self.auth_input)
# Save button
save_button = Button(
text="Save",
size_hint=(None, None),
size=(dp(100), dp(40)),
pos_hint={"center_x": 0.5},
)
save_button.bind(on_release=self.save_config)
# Add all widgets to the layout
self.add_widget(channel_layout)
self.add_widget(nickname_layout)
self.add_widget(auth_layout)
self.add_widget(save_button)
def save_config(self, instance):
# Get values from inputs
self.default_config["Channel"] = self.channel_input.text.strip()
self.default_config["Nickname"] = self.nickname_input.text.strip()
self.default_config["Authentication"] = self.auth_input.text.strip()
try:
# Create directory if it doesn't exist
self.config_path.parent.mkdir(parents=True, exist_ok=True)
# Save configuration
with self.config_path.open("w") as f:
json.dump(self.default_config, f, indent=4)
# Show success message
success_popup = Popup(
title="Success",
content=Label(text="Configuration saved successfully"),
size_hint=(None, None),
size=(dp(250), dp(100)),
)
success_popup.open()
Clock.schedule_once(success_popup.dismiss, 1)
except Exception as e:
# Show error message if saving fails
error_popup = Popup(
title="Error",
content=Label(text=f"Failed to save configuration:\n{str(e)}"),
size_hint=(None, None),
size=(dp(400), dp(150)),
)
error_popup.open()

View file

@ -0,0 +1,966 @@
import sqlite3
import logging
import random
import string
from typing import Any, List, Optional, Tuple
import platformdirs
logger = logging.getLogger(__name__)
class Database:
"""
The database created is called `MarkovChain_{channel}.db`,
and populated with 27 + 27^2 = 756 tables. Firstly, 27 tables with the structure of
"MarkovStart{char}", i.e. called:
> MarkovStartA
> MarkovStartB
> ...
> MarkovStartZ
> MarkovStart_
These tables store the first two words of a sentence, alongside a "count" frequency.
The suffix of the table name is the first character of the first word in the entry.
For example, from a sentence "I am the developer of this bot", "I am" is learned by creating
or updating an entry in MarkovStartI where the first word is "I", the second word is "am",
and the "count" value increments every time the sequence "I am" was learned.
If instead we learn, "[he said hello]", then "[he said" is learned by creating or updating
an entry in MarkovStart_.
Alongside the MarkovStart... tables, there are 729 tables called "MarkovGrammar{char}{char}",
i.e. called:
> MarkovGrammarAA
> MarkovGrammarAB
> ...
> MarkovGrammarAZ
> MarkovGrammarA_
> MarkovGrammarBA
> MarkovGrammarBB
> ...
> MarkovGrammar_Z
> MarkovGrammar__
These tables store 3-grams, alongside a "count" frequency of this 3-gram. The suffix of the
table name is the first character of the first word in the 3-gram, with the first character
of the second word in the 3-gram.
If we revisit the example of "I am the developer of this bot", we learn the following 3-grams:
> "I am the"
> "am the developer"
> "the developer of"
> "developer of this"
> "of this bot"
> "this bot <END>"
The 3-gram "am the developer" will be placed in MarkovGrammarAT, by creating or updating an entry
where the first word is "am", the second is "the", and the third "developer", while the "count"
frequency is incremented every time the 3-gram "am the developer" is learned.
The core of the knowledge base are the MarkovGrammar tables, which can be used to create
functions that take a certain number of words as input, and then generate a new word. For example:
Given "I am", we can use the MarkovGrammarIA table to look for entries that have "I" as the first word,
and "am" as the second word. If there are multiple options, we can use the "count" frequency as
weights to pick an appropriate "next word".
Important notes:
- Learning is *case sensitive*. The 3-gram "YOU ARE A" will become a different entry than "you are a".
This is most important when learning emotes, where the distinction between "Kappa" and "kappa" truly is important.
- Generating is *case insensitive*. Generating when using "YOU ARE" as the previous words to use in e.g. self.get_next()
will get the same results as generating using "you are".
- Learning and generating is *punctuation insensitive*. Each sentence is tokenized to split commas, dots, apostrophes, etc.
As a result, the sentence "Hello, I'm Tom!" is tokenized to: ["Hello", ",", "I", "'m", "Tom", "!"]. Then, 3-grams of this
is learned.
- Both learning and generating is *punctuation sensitive*. "Hello, how are" will learn and generate differently than
"Hello how are", as the first word is taken as "Hello,", which differs from "Hello".
A solution is to completely remove punctuation. Before learning, before generating, etc.
Essentially ignore that it exists.
However, this is not entirely desirable. In a perfect world, we would like to learn "hello,"
and "hello" differently, just like "HELLO" and "hello", but allow generating from "hello"
to both get results from "hello" and "hello,".
"""
def __init__(self, channel: str):
self.user_data_path = platformdirs.user_data_path(
"markovbot_gui", ensure_exists=True
)
self.db_path = (
self.user_data_path / f"MarkovChain_{channel.replace('#', '').lower()}.db"
)
self._execute_queue = []
if self.db_path.is_file():
# Ensure the database is updated to the newest version
self.update_v1(channel)
self.update_v2()
self.update_v3(channel)
# Create database tables.
for first_char in list(string.ascii_uppercase) + ["_"]:
self.add_execute_queue(
f"""
CREATE TABLE IF NOT EXISTS MarkovStart{first_char} (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
count INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY)
);
""",
auto_commit=False,
)
for second_char in list(string.ascii_uppercase) + ["_"]:
self.add_execute_queue(
f"""
CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
word3 TEXT COLLATE NOCASE,
count INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)
);
""",
auto_commit=False,
)
sql = """
CREATE TABLE IF NOT EXISTS WhisperIgnore (
username TEXT,
PRIMARY KEY (username)
);
"""
self.add_execute_queue(sql)
# Add a version entry
sql = """
CREATE TABLE IF NOT EXISTS Version (
version INTEGER
);
"""
self.add_execute_queue(sql)
self.add_execute_queue("DELETE FROM Version;")
self.add_execute_queue("INSERT INTO Version (version) VALUES (3);")
self.execute_commit()
# Used for randomly picking a Markov Grammar if only one word is given
# Index 0 is for "A", 1 for "B", etc. Then, 26 is for "_"
self.word_frequency = [
11.6,
4.4,
5.2,
3.1,
2.8,
4,
1.6,
4.2,
7.3,
0.5,
0.8,
2.4,
3.8,
2.2,
7.6,
4.3,
0.2,
2.8,
6.6,
15.9,
1.1,
0.8,
5.5,
0.1,
0.7,
0.1,
0.5,
]
def update_v1(self, channel: str):
"""Update the Database structure from a deprecated version to a newer one.
Args:
channel (str): The name of the Twitch channel on which the bot is running.
"""
# If an old version of the Database is used, update the database
if ("MarkovGrammarA",) in self.execute(
"SELECT name FROM sqlite_master WHERE type='table';", fetch=True
):
logger.info("Creating backup before updating Database...")
# Connect to both the new and backup, backup, and close both
def progress(status, remaining, total):
logging.debug(f"Copied {total-remaining} of {total} pages...")
conn = sqlite3.connect(f"MarkovChain_{channel.replace('#', '').lower()}.db")
back_conn = sqlite3.connect(
f"MarkovChain_{channel.replace('#', '').lower()}_backup.db"
)
with back_conn:
conn.backup(back_conn, pages=1000, progress=progress)
conn.close()
back_conn.close()
logger.info("Created backup before updating Database...")
logger.info("Updating Database to new version for improved efficiency...")
# Rename ...Other to ..._
self.add_execute_queue("""
CREATE TABLE IF NOT EXISTS MarkovStart_ (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
occurances INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY)
);
""")
self.add_execute_queue("""
CREATE TABLE IF NOT EXISTS MarkovGrammar_ (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
word3 TEXT COLLATE NOCASE,
occurances INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)
);
""")
self.execute_commit()
# Copy data from Other to _ and remove Other
self.add_execute_queue(
"INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammarOther;"
)
self.add_execute_queue(
"INSERT INTO MarkovStart_ SELECT * FROM MarkovStartOther;"
)
self.add_execute_queue("DROP TABLE MarkovGrammarOther")
self.add_execute_queue("DROP TABLE MarkovStartOther")
self.execute_commit()
# Copy all data from MarkovGrammarx where x is some digit to MarkovGrammar_,
# Same with MarkovStart.
for character in list(string.digits):
self.add_execute_queue(
f"INSERT INTO MarkovGrammar_ SELECT * FROM MarkovGrammar{character}"
)
self.add_execute_queue(f"DROP TABLE MarkovGrammar{character}")
self.add_execute_queue(
f"INSERT INTO MarkovStart_ SELECT * FROM MarkovStart{character}"
)
self.add_execute_queue(f"DROP TABLE MarkovStart{character}")
self.execute_commit()
# Split up MarkovGrammarA into MarkovGrammarAA, MarkovGrammarAB, etc.
for first_char in list(string.ascii_uppercase) + ["_"]:
for second_char in list(string.ascii_uppercase):
self.add_execute_queue(f"""
CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}{second_char} (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
word3 TEXT COLLATE NOCASE,
occurances INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)
);
""")
self.add_execute_queue(
f'INSERT INTO MarkovGrammar{first_char}{second_char} SELECT * FROM MarkovGrammar{first_char} WHERE word2 LIKE "{second_char}%";'
)
self.add_execute_queue(
f'DELETE FROM MarkovGrammar{first_char} WHERE word2 LIKE "{second_char}%";'
)
self.add_execute_queue(f"""
CREATE TABLE IF NOT EXISTS MarkovGrammar{first_char}_ (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
word3 TEXT COLLATE NOCASE,
occurances INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)
);
""")
self.add_execute_queue(
f"INSERT INTO MarkovGrammar{first_char}_ SELECT * FROM MarkovGrammar{first_char};"
)
self.add_execute_queue(f"DROP TABLE MarkovGrammar{first_char}")
self.execute_commit()
logger.info("Finished Updating Database to new version.")
def update_v2(self):
"""Update the Database structure from a deprecated version to a newer one.
This update involves a typo.
Args:
channel (str): The name of the Twitch channel on which the bot is running.
"""
# Resolve typo in Database
if self.execute(
"SELECT * FROM PRAGMA_TABLE_INFO('MarkovGrammarAA') WHERE name='occurances';",
fetch=True,
):
logger.info("Updating Database to new version...")
for first_char in list(string.ascii_uppercase) + ["_"]:
for second_char in list(string.ascii_uppercase) + ["_"]:
self.execute(
f"ALTER TABLE MarkovGrammar{first_char}{second_char} RENAME COLUMN occurances TO count;"
)
self.execute(
f"ALTER TABLE MarkovStart{first_char} RENAME COLUMN occurances TO count;"
)
logger.info("Finished Updating Database to new version.")
def update_v3(self, channel: str) -> None:
"""Update the Database structure to mark punctuation as a separate word.
Previously, "Hello," was a valid single word. Now, it would be split as "Hello" and ",".
This allows people to generate "!g hello", and have the bot generate "hello, how are you?",
or have "!g it" result in "it's a wonderful day".
This first copies `MarkovChain_{channel}.db` to `MarkovChain_{channel}_modified.db`.
This new copy is then modified. The original is never changed, to avoid issues when the
update is interrupted. As a result, running the program again will just re-attempt the
update.
Upon completing the update, the original database is renamed to
`MarkovChain_{channel}_backup.db`, while the newly modified `MarkovChain_{channel}_modified.db`
is renamed to `MarkovChain_{channel}.db`.
*This `MarkovChain_{channel}_backup.db` file can safely be deleted, as it is NOT used*
This function also adds a `Version` table, and sets the version to 3.
Args:
channel (str): The name of the Twitch channel on which the bot is running.
"""
# Get Database version. Throws OperationalError if the Version table does not exist,
# in which case we definitely want to upgrade.
try:
version = self.execute(
"SELECT version FROM Version ORDER BY version DESC LIMIT 1;", fetch=True
)
except sqlite3.OperationalError:
version = []
# Whether to upgrade
if not version or version[0][0] < 3:
logger.info(
"Updating Database to new version - supports better punctuation handling."
)
from shutil import copyfile
import os
from Tokenizer import tokenize
from nltk import ngrams
channel = channel.replace("#", "").lower()
copyfile(f"MarkovChain_{channel}.db", f"MarkovChain_{channel}_modified.db")
logger.info(
f'Created a copy of the database called "MarkovChain_{channel}_modified.db". The update will modify this file.'
)
# Temporarily set self.db_name to the modified one
self.db_path = (
self.user_data_path
/ f"MarkovChain_{channel.replace('#', '').lower()}_modified.db"
)
# Create database tables.
for first_char in list(string.ascii_uppercase) + ["_"]:
table = f"MarkovStart{first_char}"
self.add_execute_queue(
f"""
CREATE TABLE IF NOT EXISTS {table}_modified (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
count INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY)
);
""",
auto_commit=False,
)
for second_char in list(string.ascii_uppercase) + ["_"]:
table = f"MarkovGrammar{first_char}{second_char}"
self.add_execute_queue(
f"""
CREATE TABLE IF NOT EXISTS {table}_modified (
word1 TEXT COLLATE NOCASE,
word2 TEXT COLLATE NOCASE,
word3 TEXT COLLATE NOCASE,
count INTEGER,
PRIMARY KEY (word1 COLLATE BINARY, word2 COLLATE BINARY, word3 COLLATE BINARY)
);
""",
auto_commit=False,
)
self.execute_commit()
def modify_start(table: str) -> None:
"""Read all data from `table`, re-tokenize it, distribute the new first 2 tokens to _modified tables, and drop `table`.
Args:
table (str): The name of the table to work on.
"""
data = self.execute(f"SELECT * FROM {table};", fetch=True)
for tup in data:
# Remove "count" from tup for now
count = tup[-1]
tup = tup[:-1]
raw_string = " ".join(tup)
tokenized = tokenize(raw_string)
two_gram = tokenized[:2]
# In case there was some issue in the previous Database
if len(two_gram) < 2:
continue
self.add_execute_queue(
f"""
INSERT OR REPLACE INTO MarkovStart{self.get_suffix(two_gram[0][0])}_modified (word1, word2, count)
VALUES (?, ?, coalesce (
(
SELECT count + {count} FROM MarkovStart{self.get_suffix(two_gram[0][0])}_modified
WHERE word1 = ? COLLATE BINARY
AND word2 = ? COLLATE BINARY
),
1
)
)""",
values=two_gram + two_gram,
auto_commit=False,
)
self.execute(f"DROP TABLE {table};")
def modify_grammar(table: str) -> None:
"""Read all data from `table`, re-tokenize it, distribute the new 3-grams to _modified tables, and drop `table`.
Args:
table (str): The name of the table to work on.
"""
data = self.execute(f"SELECT * FROM {table};", fetch=True)
for tup in data:
# Remove "count" from tup for now
count = tup[-1]
tup = tup[:-1]
# If ends on "<END>", ignore that in in the tuple, as we don't want it to get
# tokenized.
end = False
if tup[-1] == "<END>":
end = True
tup = tup[:-1]
raw_string = " ".join(tup)
tokenized = tokenize(raw_string)
# Re-add "<END>"
if end:
tokenized.append("<END>")
for ngram in ngrams(tokenized, 3):
# Filter out recursive case.
if self.check_equal(ngram):
continue
self.add_execute_queue(
f"""
INSERT OR REPLACE INTO MarkovGrammar{self.get_suffix(ngram[0][0])}{self.get_suffix(ngram[1][0])}_modified (word1, word2, word3, count)
VALUES (?, ?, ?, coalesce (
(
SELECT count + {count} FROM MarkovGrammar{self.get_suffix(ngram[0][0])}{self.get_suffix(ngram[1][0])}_modified
WHERE word1 = ? COLLATE BINARY
AND word2 = ? COLLATE BINARY
AND word3 = ? COLLATE BINARY
),
1
)
)""",
values=ngram + ngram,
auto_commit=False,
)
self.execute(f"DROP TABLE {table};")
# Modify all tables
i = 0
total = 27 * 27 + 27 # The number of tables to convert
for first_char in list(string.ascii_uppercase) + ["_"]:
table = f"MarkovStart{first_char}"
modify_start(table)
i += 1
for second_char in list(string.ascii_uppercase) + ["_"]:
table = f"MarkovGrammar{first_char}{second_char}"
modify_grammar(table)
i += 1
logger.debug(
f"[{i / total * 100:.2f}%] Scheduled updates for the tables for words starting in {first_char}."
)
logger.info("Starting executing table update...")
self.execute_commit()
logger.info("Finished executing table update.")
# Rename the _modified tables to normal tables again
for first_char in list(string.ascii_uppercase) + ["_"]:
table = f"MarkovStart{first_char}"
self.add_execute_queue(
f"ALTER TABLE {table}_modified RENAME TO {table};",
auto_commit=False,
)
for second_char in list(string.ascii_uppercase) + ["_"]:
table = f"MarkovGrammar{first_char}{second_char}"
self.add_execute_queue(
f"ALTER TABLE {table}_modified RENAME TO {table};",
auto_commit=False,
)
self.execute_commit()
# Turn the non-modified, old version of the Database into a "_backup.db" file,
# and turn the modified file into the new main file.
os.rename(f"MarkovChain_{channel}.db", f"MarkovChain_{channel}_backup.db")
os.rename(f"MarkovChain_{channel}_modified.db", f"MarkovChain_{channel}.db")
# Revert to using .db instead of _modified.db
self.db_path = (
self.user_data_path
/ f"MarkovChain_{channel.replace('#', '').lower()}.db"
)
# Add a version entry
self.execute("""CREATE TABLE IF NOT EXISTS Version (
version INTEGER
);""")
self.execute("DELETE FROM Version;")
self.execute("INSERT INTO Version (version) VALUES (3);")
logger.info(
f'Renamed original database file "MarkovChain_{channel}.db" to "MarkovChain_{channel}_backup.db". This file is *not* used, and can safely be deleted.'
)
logger.info(
f'Renamed updated database file "MarkovChain_{channel}_modified.db" to "MarkovChain_{channel}.db".'
)
logger.info(
f'This updated "MarkovChain_{channel}.db" will be used to drive the Twitch bot.'
)
def add_execute_queue(
self, sql: str, values: Tuple[Any] = None, auto_commit: bool = True
) -> None:
"""Add query and corresponding values to a queue, to be executed all at once.
This entire queue can be executed with `self.execute_commit`,
and the queue is automatically executed if there are more than 25 waiting queries.
Args:
sql (str): The SQL query to add, potentially with "?" for where
a value ought to be filled in.
values ([Tuple[Any]], optional): Optional tuple of values to replace "?" in SQL queries.
Defaults to None.
"""
if values is not None:
self._execute_queue.append([sql, values])
else:
self._execute_queue.append([sql])
# Commit these executes if there are more than 25 queries
if auto_commit and len(self._execute_queue) > 25:
self.execute_commit()
def execute_commit(self, fetch: bool = False) -> Any:
"""Execute the SQL queries added to the queue with `self.add_execute_queue`.
Args:
fetch (bool, optional): Whether to return the fetchall() of the SQL queries.
Defaults to False.
Returns:
Any: The returned values from the SQL queries if `fetch` is true, otherwise None.
"""
if self._execute_queue:
with sqlite3.connect(self.db_path) as conn:
cur = conn.cursor()
cur.execute("begin")
for sql in self._execute_queue:
cur.execute(*sql)
self._execute_queue.clear()
cur.execute("commit")
if fetch:
return cur.fetchall()
def execute(self, sql: str, values: Tuple[Any] = None, fetch: bool = False):
"""Execute the SQL query with the corresponding values, potentially returning a result.
Args:
sql (str): The SQL query to add, potentially with "?" for where
a value ought to be filled in.
values ([Tuple[Any]], optional): Optional tuple of values to replace "?" in SQL queries.
Defaults to None.
fetch (bool, optional): Whether to return the fetchall() of the SQL queries.
Defaults to False.
Returns:
Any: The returned values from the SQL queries if `fetch` is true, otherwise None.
"""
with sqlite3.connect(self.db_path) as conn:
cur = conn.cursor()
if values is None:
cur.execute(sql)
else:
cur.execute(sql, values)
conn.commit()
if fetch:
return cur.fetchall()
@staticmethod
def get_suffix(character: str) -> str:
"""Transform a character into a member of string.ascii_lowercase or "_".
Args:
character (str): The character to normalize.
Returns:
str: The normalized character
"""
if character.lower() in string.ascii_lowercase:
return character.upper()
return "_"
def add_whisper_ignore(self, username: str) -> None:
"""Add `username` to the WhisperIgnore table, indicating that they do not wish to be whispered.
Args:
username (str): The username of the user who no longer wants to be whispered.
"""
self.execute(
"""
INSERT OR IGNORE INTO WhisperIgnore(username)
SELECT ?;""",
values=(username,),
)
def check_whisper_ignore(self, username: str) -> List[Tuple[str]]:
"""Returns a non-empty list only if `username` is in the WhisperIgnore table.
Otherwise, returns an empty list. Is used to ensure that a user who doesn't want to be
whispered is never whispered.
Args:
username (str): The username of the user to check.
Returns:
List[Tuple[str]]: Either an empty list, or [('test_user',)].
Allows the use of `if not check_whisper_ignore(user): whisper(user)`
"""
return self.execute(
"""
SELECT username FROM WhisperIgnore
WHERE username = ?;""",
values=(username,),
fetch=True,
)
def remove_whisper_ignore(self, username: str) -> None:
"""Remove `username` from the WhisperIgnore table, indicating that they want to be whispered again.
Args:
username (str): The username of the user who wants to be whispered again.
"""
self.execute(
"""
DELETE FROM WhisperIgnore
WHERE username = ?;""",
values=(username,),
)
@staticmethod
def check_equal(items: list) -> bool:
"""True if `items` consists of items that are all identical
Useful for checking if we're learning that a sequence of the same words leads to the same word,
which can cause infinite loops when generating.
Args:
items (list): The list of objects for which we want to check if they are all identical.
Returns:
bool: True if `l` consists of items that are all identical
"""
return items[0] * len(items) == items
def get_next(self, index: int, words: List[str]) -> Optional[str]:
"""Generate the next word in the sentence using learned data, given the previous `key_length` words.
`key_length` is set to 2 by default, and cannot easily be changed.
Args:
index (int): The index of this new word in the sentence.
words (List[str]): The previous 2 words.
Returns:
Optional[str]: The next word in the sentence, generated given the learned data.
"""
# Get all items
data = self.execute(
f"""
SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])}
WHERE word1 = ? AND word2 = ?;""",
values=words,
fetch=True,
)
# Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else self.pick_word(data, index)
def get_next_initial(self, index: int, words) -> Optional[str]:
"""Generate the next word in the sentence using learned data, given the previous `key_length` words.
`key_length` is set to 2 by default, and cannot easily be changed.
Similar to `get_next`, with the exception that it cannot immediately generate "<END>"
Args:
index (int): The index of this new word in the sentence.
words (List[str]): The previous 2 words.
Returns:
Optional[str]: The next word in the sentence, generated given the learned data.
"""
# Get all items
data = self.execute(
f"""
SELECT word3, count FROM MarkovGrammar{self.get_suffix(words[0][0])}{self.get_suffix(words[1][0])}
WHERE word1 = ? AND word2 = ? AND word3 != '<END>';""",
values=words,
fetch=True,
)
# Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else self.pick_word(data, index)
def get_next_single_initial(self, index: int, word: str) -> Optional[List[str]]:
"""Generate the next word in the sentence using learned data, given the previous word.
Randomly picks a start character for the second word by weighing all uppercase letters and "_" with their word frequency.
Args:
index (int): The index of this new word in the sentence.
word (str): The previous word.
Returns:
Optional[List[str]]: The previous and newly generated word in the sentence as a list, generated given the learned data.
So, the previous word is taken directly the input of this method, and the second word is generated.
"""
# Randomly pick first character for the second word
char_two = random.choices(
string.ascii_uppercase + "_", weights=self.word_frequency
)[0]
# Get all items
data = self.execute(
f"""
SELECT word2, count FROM MarkovGrammar{self.get_suffix(word[0])}{char_two}
WHERE word1 = ? AND word2 != '<END>';""",
values=(word,),
fetch=True,
)
# Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else [word] + [self.pick_word(data, index)]
def get_next_single_start(self, word: str) -> Optional[List[str]]:
"""Generate the second word in the sentence using learned data, given the very first word in the sentence.
Args:
word (str): The first word in the sentence.
Returns:
Optional[List[str]]: The first and second word in the sentence as a list, generated given the learned data.
So, the first word is taken directly the input of this method, and the second word is generated.
"""
# Get all items
data = self.execute(
f"""
SELECT word2, count FROM MarkovStart{self.get_suffix(word[0])}
WHERE word1 = ?;""",
values=(word,),
fetch=True,
)
# Return a word picked from the data, using count as a weighting factor
return None if len(data) == 0 else [word] + [self.pick_word(data)]
@staticmethod
def pick_word(data: List[Tuple[str, int]], index: int = 0) -> str:
"""Randomly pick a word from `data` with word frequency as the weight.
`index` is further used to decrease the weight of the <END> token for the first 15 words
in the sequence, and then increase the weight after the 15th index.
Args:
data ([type]): A list of word - frequency pairs, e.g.
[('"the', 1), ('long', 1), ('well', 5), ('an', 2), ('a', 3), ('much', 1)]
index (int, optional): The index of the newly generated word in the sentence.
Used for modifying how often the <END> token occurs. Defaults to 0.
Returns:
str: The pseudo-randomly picked word.
"""
return random.choices(
data,
weights=[
tup[-1] * ((index + 1) / 15) if tup[0] == "<END>" else tup[-1]
for tup in data
],
)[0][0]
def get_start(self) -> List[str]:
"""Get a list of two words that mark as the start of a sentence.
This is randomly gathered from MarkovStart{character}.
Returns:
List[str]: A list of two starting words, such as ["I", "am"].
"""
# Find one character start from
character = random.choices(
list(string.ascii_lowercase) + ["_"], weights=self.word_frequency, k=1
)[0]
# Get all first word, second word, frequency triples,
# e.g. [("I", "am", 3), ("You", "are", 2), ...]
data = self.execute(f"SELECT * FROM MarkovStart{character};", fetch=True)
# If nothing has ever been said
if len(data) == 0:
return []
# Return a (weighted) randomly chosen 2-gram
return list(
random.choices(data, weights=[tup[-1] for tup in data], k=1)[0][:-1]
)
def add_rule_queue(self, item: List[str]) -> None:
"""Adds a rule to the queue, ready to be entered into the knowledge base, given a 3-gram `item`.
The rules on the queue are added with `self.add_execute_queue`,
which automatically executes the queries in the queue when there are enough queries waiting.
Whenever `item` consists of three identical words, e.g. ["Kappa", "Kappa", "Kappa"], then
we perform no learning. If we did, this could cause infinite recursion in generation.
Args:
item (List[str]): A 3-gram, e.g. ['How', 'are', 'you']. This is learned by placing this
in the MarkovGrammarHA table, where it can be seen as:
*Given ["How", "are"], then "you" is a potential output*
The frequency of this word as an output is then incremented,
allowing for weighted picking of outputs.
"""
# Filter out recursive case.
if self.check_equal(item):
return
if (
"" in item
): # prevent adding invalid rules. Ideally this wouldn't trigger, but it seems to happen rarely.
logger.warning(
f"Failed to add item to rules. Item contains empty string: {item!r}"
)
return
self.add_execute_queue(
f"""
INSERT OR REPLACE INTO MarkovGrammar{self.get_suffix(item[0][0])}{self.get_suffix(item[1][0])} (word1, word2, word3, count)
VALUES (?, ?, ?, coalesce(
(
SELECT count + 1 FROM MarkovGrammar{self.get_suffix(item[0][0])}{self.get_suffix(item[1][0])}
WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY AND word3 = ? COLLATE BINARY
),
1)
)""",
values=item + item,
)
def add_start_queue(self, item: List[str]) -> None:
"""Adds a rule to the queue, ready to be entered into the knowledge base, given a 2-gram `item`.
The rules on the queue are added with `self.add_execute_queue`,
which automatically executes the queries in the queue when there are enough queries waiting.
Args:
item (List[str]): A 2-gram, e.g. ['How', 'are']. This is learned by placing this
in the MarkovStartH table, where it can be randomly (with frequency as weight)
picked as a start of a sentence.
"""
self.add_execute_queue(
f"""
INSERT OR REPLACE INTO MarkovStart{self.get_suffix(item[0][0])} (word1, word2, count)
VALUES (?, ?, coalesce(
(
SELECT count + 1 FROM MarkovStart{self.get_suffix(item[0][0])}
WHERE word1 = ? COLLATE BINARY AND word2 = ? COLLATE BINARY
),
1)
)""",
values=item + item,
)
def unlearn(self, message: str) -> None:
"""Remove frequency of 3-grams from `message` from the knowledge base.
Useful when a message is deleted - usually we want the bot to say those things less frequently.
The frequency count for each of the 3-grams is reduced by 5, i.e. the message is unlearned by 5
times the rate that a message is learned.
If this means the frequency for the 3-gram becomes negative,
we delete the 3-gram from the knowledge base entirely.
Args:
message (str): The message to unlearn.
"""
words = message.split(" ")
# Construct 3-grams
tuples = [
(words[i], words[i + 1], words[i + 2]) for i in range(0, len(words) - 2)
]
# Unlearn start of sentence from MarkovStart
if len(words) > 1:
# Reduce "count" by 5
self.add_execute_queue(
f"""
UPDATE MarkovStart{self.get_suffix(words[0][0])}
SET count = count - 5
WHERE word1 = ? AND word2 = ?;""",
values=(
words[0],
words[1],
),
)
# Delete if count is now less than 0.
self.add_execute_queue(
f"""
DELETE FROM MarkovStart{self.get_suffix(words[0][0])}
WHERE word1 = ? AND word2 = ? AND count <= 0;""",
values=(
words[0],
words[1],
),
)
# Unlearn all 3 word sections from Grammar
for word1, word2, word3 in tuples:
# Reduce "count" by 5
self.add_execute_queue(
f"""
UPDATE MarkovGrammar{self.get_suffix(word1[0])}{self.get_suffix(word2[0])}
SET count = count - 5
WHERE word1 = ? AND word2 = ? AND word3 = ?;""",
values=(
word1,
word2,
word3,
),
)
# Delete if count is now less than 0.
self.add_execute_queue(
f"""
DELETE FROM MarkovGrammar{self.get_suffix(word1[0])}{self.get_suffix(word2[0])}
WHERE word1 = ? AND word2 = ? AND word3 = ? AND count <= 0;""",
values=(
word1,
word2,
word3,
),
)
self.execute_commit()

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 CubieDev
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,29 @@
import logging
import os
import logging.config
class Log:
def __init__(self, main_file: str):
# Dynamically change size set up for name in the logger
this_file = os.path.basename(main_file)
from src.markovbot_gui.libs.Settings import Settings
# If you have a logging config like me, use it
if "PYTHON_LOGGING_CONFIG" in os.environ:
logging.config.fileConfig(
os.environ.get("PYTHON_LOGGING_CONFIG"),
defaults={
"logfilename": this_file.replace(".py", "_")
+ Settings.get_channel()
+ ".log"
},
disable_existing_loggers=False,
)
else:
# If you don't, use a standard config that outputs some INFO in the console
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] [%(name)s] [%(levelname)-8s] - %(message)s",
)

View file

@ -0,0 +1,675 @@
from pathlib import Path
from typing import List, Tuple
from TwitchWebsocket import Message, TwitchWebsocket
from nltk.tokenize import sent_tokenize
import socket
import time
import logging
import re
import string
from src.markovbot_gui.libs.Settings import Settings, get_settings
from src.markovbot_gui.libs.Database import Database
from src.markovbot_gui.libs.Timer import LoopingTimer
from src.markovbot_gui.libs.Tokenizer import detokenize, tokenize
from src.markovbot_gui.libs.Log import Log
Log(__file__)
logger = logging.getLogger(__name__)
class MarkovChain:
def __init__(self, settings_path: Path | None = None):
self.settings_path = settings_path
self.prev_message_t = 0
self._enabled = True
# This regex should detect similar phrases as links as Twitch does
self.link_regex = re.compile("\w+\.[a-z]{2,}")
# List of moderators used in blacklist modification, includes broadcaster
self.mod_list = []
self.set_blacklist()
# Fill previously initialised variables with data from the settings.txt file
self.settings = get_settings(settings_path=self.settings_path)
self.set_settings()
self.db = Database(self.chan)
# Set up daemon Timer to send help messages
if self.help_message_timer > 0:
if self.help_message_timer < 300:
raise ValueError(
'Value for "HelpMessageTimer" in must be at least 300 seconds, or a negative number for no help messages.'
)
t = LoopingTimer(self.help_message_timer, self.send_help_message)
t.start()
# Set up daemon Timer to send automatic generation messages
if self.automatic_generation_timer > 0:
if self.automatic_generation_timer < 30:
raise ValueError(
'Value for "AutomaticGenerationMessage" in must be at least 30 seconds, or a negative number for no automatic generations.'
)
t = LoopingTimer(
self.automatic_generation_timer, self.send_automatic_generation_message
)
t.start()
self.ws = TwitchWebsocket(
host=self.host,
port=self.port,
chan=self.chan,
nick=self.nick,
auth=self.auth,
callback=self.message_handler,
capability=["commands", "tags"],
live=True,
)
def run_bot(self):
self.ws.start_bot()
def stop_bot(self):
self.ws.stop()
def set_settings(self):
settings = self.settings.read_settings()
self.host = settings["Host"]
self.port = settings["Port"]
self.chan = settings["Channel"]
self.nick = settings["Nickname"]
self.auth = settings["Authentication"]
self.denied_users = [user.lower() for user in settings["DeniedUsers"]] + [
self.nick.lower()
]
self.allowed_users = [user.lower() for user in settings["AllowedUsers"]]
self.cooldown = settings["Cooldown"]
self.key_length = settings["KeyLength"]
self.max_sentence_length = settings["MaxSentenceWordAmount"]
self.min_sentence_length = settings["MinSentenceWordAmount"]
self.help_message_timer = settings["HelpMessageTimer"]
self.automatic_generation_timer = settings["AutomaticGenerationTimer"]
self.whisper_cooldown = settings["WhisperCooldown"]
self.enable_generate_command = settings["EnableGenerateCommand"]
self.sent_separator = settings["SentenceSeparator"]
self.allow_generate_params = settings["AllowGenerateParams"]
self.generate_commands = tuple(settings["GenerateCommands"])
def message_handler(self, m: Message):
try:
if m.type == "366":
logger.info(f"Successfully joined channel: #{m.channel}")
# Get the list of mods used for modifying the blacklist
logger.info("Fetching mod list...")
self.ws.send_message("/mods")
elif m.type == "NOTICE":
# Check whether the NOTICE is a response to our /mods request
if m.message.startswith("The moderators of this channel are:"):
string_list = m.message.replace(
"The moderators of this channel are:", ""
).strip()
self.mod_list = [m.channel] + string_list.split(", ")
logger.info(
f"Fetched mod list. Found {len(self.mod_list) - 1} mods."
)
elif m.message == "There are no moderators of this channel.":
self.mod_list = [m.channel]
logger.info("Fetched mod list. Found no mods.")
# If it is not, log this NOTICE
else:
logger.info(m.message)
elif m.type in ("PRIVMSG", "WHISPER"):
if m.message.startswith("!enable") and self.check_if_permissions(m):
if self._enabled:
self.ws.send_whisper(
m.user, "The generate command is already enabled."
)
else:
self.ws.send_whisper(
m.user, "Users can now use generate command again."
)
self._enabled = True
logger.info("Users can now use generate command again.")
elif m.message.startswith("!disable") and self.check_if_permissions(m):
if self._enabled:
self.ws.send_whisper(
m.user, "Users can now no longer use generate command."
)
self._enabled = False
logger.info("Users can now no longer use generate command.")
else:
self.ws.send_whisper(
m.user, "The generate command is already disabled."
)
elif m.message.startswith(
("!setcooldown", "!setcd")
) and self.check_if_permissions(m):
split_message = m.message.split(" ")
if len(split_message) == 2:
try:
cooldown = int(split_message[1])
except ValueError:
self.ws.send_whisper(
m.user,
"The parameter must be an integer amount, eg: !setcd 30",
)
return
self.cooldown = cooldown
Settings.update_cooldown(cooldown)
self.ws.send_whisper(
m.user,
f"The !generate cooldown has been set to {cooldown} seconds.",
)
else:
self.ws.send_whisper(
m.user,
"Please add exactly 1 integer parameter, eg: !setcd 30.",
)
if m.type == "PRIVMSG":
# Ignore bot messages
if m.user.lower() in self.denied_users:
return
if self.check_if_generate(m.message):
if (
not self.enable_generate_command
and not self.check_if_permissions(m)
):
return
if not self._enabled:
if not self.db.check_whisper_ignore(m.user):
self.send_whisper(
m.user,
"The !generate has been turned off. !nopm to stop me from whispering you.",
)
return
cur_time = time.time()
if (
self.prev_message_t + self.cooldown < cur_time
or self.check_if_permissions(m)
):
if self.check_filter(m.message):
sentence = "You can't make me say that, you madman!"
else:
params = (
tokenize(m.message)[2:]
if self.allow_generate_params
else None
)
# Generate an actual sentence
sentence, success = self.generate(params)
if success:
# Reset cooldown if a message was actually generated
self.prev_message_t = time.time()
logger.info(sentence)
self.ws.send_message(sentence)
else:
if not self.db.check_whisper_ignore(m.user):
self.send_whisper(
m.user,
f"Cooldown hit: {self.prev_message_t + self.cooldown - cur_time:0.2f} out of {self.cooldown:.0f}s remaining. !nopm to stop these cooldown pm's.",
)
logger.info(
f"Cooldown hit with {self.prev_message_t + self.cooldown - cur_time:0.2f}s remaining."
)
return
# Send help message when requested.
elif m.message.startswith(("!ghelp", "!genhelp", "!generatehelp")):
self.send_help_message()
# Ignore the message if it is deemed a command
elif self.check_if_other_command(m.message):
return
# Ignore the message if it contains a link.
elif self.check_link(m.message):
return
if "emotes" in m.tags:
# If the list of emotes contains "emotesv2_", then the message contains a bit emote,
# and we choose not to learn from those messages.
if "emotesv2_" in m.tags["emotes"]:
return
# Replace modified emotes with normal versions,
# as the bot will never have the modified emotes unlocked at the time.
for modifier in self.extract_modifiers(m.tags["emotes"]):
m.message = m.message.replace(modifier, "")
# Ignore the message if any word in the sentence is on the ban filter
if self.check_filter(m.message):
logger.warning(
f'Sentence contained blacklisted word or phrase:"{m.message}"'
)
return
else:
# Try to split up sentences. Requires nltk's 'punkt' resource
try:
sentences = sent_tokenize(m.message.strip())
# If 'punkt' is not downloaded, then download it, and retry
except LookupError:
logger.debug("Downloading required punkt resource...")
import nltk
nltk.download("punkt")
logger.debug("Downloaded required punkt resource.")
sentences = sent_tokenize(m.message.strip())
for sentence in sentences:
# Get all seperate words
words = tokenize(sentence)
# Double spaces will lead to invalid rules. We remove empty words here
if "" in words:
words = [word for word in words if word]
# If the sentence is too short, ignore it and move on to the next.
if len(words) <= self.key_length:
continue
# Add a new starting point for a sentence to the <START>
# self.db.add_rule(["<START>"] + [words[x] for x in range(self.key_length)])
self.db.add_start_queue(
[words[x] for x in range(self.key_length)]
)
# Create Key variable which will be used as a key in the Dictionary for the grammar
key = list()
for word in words:
# Set up key for first use
if len(key) < self.key_length:
key.append(word)
continue
self.db.add_rule_queue(key + [word])
# Remove the first word, and add the current word,
# so that the key is correct for the next word.
key.pop(0)
key.append(word)
# Add <END> at the end of the sentence
self.db.add_rule_queue(key + ["<END>"])
elif m.type == "WHISPER":
# Allow people to whisper the bot to disable or enable whispers.
if m.message == "!nopm":
logger.debug(f"Adding {m.user} to Do Not Whisper.")
self.db.add_whisper_ignore(m.user)
self.ws.send_whisper(
m.user,
"You will no longer be sent whispers. Type !yespm to reenable. ",
)
elif m.message == "!yespm":
logger.debug(f"Removing {m.user} from Do Not Whisper.")
self.db.remove_whisper_ignore(m.user)
self.ws.send_whisper(
m.user,
"You will again be sent whispers. Type !nopm to disable again. ",
)
# Note that I add my own username to this list to allow me to manage the
# blacklist in channels of my bot in channels I am not modded in.
# I may modify this and add a "allowed users" field in the settings file.
elif (
m.user.lower() in self.mod_list + ["cubiedev"] + self.allowed_users
):
# Adding to the blacklist
if self.check_if_our_command(m.message, "!blacklist"):
if len(m.message.split()) == 2:
# TODO: Remove newly blacklisted word from the Database
word = m.message.split()[1].lower()
self.blacklist.append(word)
logger.info(f"Added `{word}` to Blacklist.")
self.write_blacklist(self.blacklist)
self.ws.send_whisper(m.user, "Added word to Blacklist.")
else:
self.ws.send_whisper(
m.user,
"Expected Format: `!blacklist word` to add `word` to the blacklist",
)
# Removing from the blacklist
elif self.check_if_our_command(m.message, "!whitelist"):
if len(m.message.split()) == 2:
word = m.message.split()[1].lower()
try:
self.blacklist.remove(word)
logger.info(f"Removed `{word}` from Blacklist.")
self.write_blacklist(self.blacklist)
self.ws.send_whisper(
m.user, "Removed word from Blacklist."
)
except ValueError:
self.ws.send_whisper(
m.user, "Word was already not in the blacklist."
)
else:
self.ws.send_whisper(
m.user,
"Expected Format: `!whitelist word` to remove `word` from the blacklist.",
)
# Checking whether a word is in the blacklist
elif self.check_if_our_command(m.message, "!check"):
if len(m.message.split()) == 2:
word = m.message.split()[1].lower()
if word in self.blacklist:
self.ws.send_whisper(
m.user, "This word is in the Blacklist."
)
else:
self.ws.send_whisper(
m.user, "This word is not in the Blacklist."
)
else:
self.ws.send_whisper(
m.user,
"Expected Format: `!check word` to check whether `word` is on the blacklist.",
)
elif m.type == "CLEARMSG":
# If a message is deleted, its contents will be unlearned
# or rather, the "occurances" attribute of each combinations of words in the sentence
# is reduced by 5, and deleted if the occurances is now less than 1.
self.db.unlearn(m.message)
# TODO: Think of some efficient way to check whether it was our message that got deleted.
# If the bot's message was deleted, log this as an error
# if m.user.lower() == self.nick.lower():
# logger.error(f"This bot message was deleted: \"{m.message}\"")
except Exception as e:
logger.exception(e)
def generate(self, params: List[str] = None) -> "Tuple[str, bool]":
"""Given an input sentence, generate the remainder of the sentence using the learned data.
Args:
params (List[str]): A list of words to use as an input to use as the start of generating.
Returns:
Tuple[str, bool]: A tuple of a sentence as the first value, and a boolean indicating
whether the generation succeeded as the second value.
"""
if params is None:
params = []
# List of sentences that will be generated. In some cases, multiple sentences will be generated,
# e.g. when the first sentence has less words than self.min_sentence_length.
sentences = [[]]
# Check for commands or recursion, eg: !generate !generate
if len(params) > 0:
if self.check_if_other_command(params[0]):
return "You can't make me do commands, you madman!", False
# Get the starting key and starting sentence.
# If there is more than 1 param, get the last 2 as the key.
# Note that self.key_length is fixed to 2 in this implementation
if len(params) > 1:
key = params[-self.key_length :]
# Copy the entire params for the sentence
sentences[0] = params.copy()
elif len(params) == 1:
# First we try to find if this word was once used as the first word in a sentence:
key = self.db.get_next_single_start(params[0])
if key is None:
# If this failed, we try to find the next word in the grammar as a whole
key = self.db.get_next_single_initial(0, params[0])
if key is None:
# Return a message that this word hasn't been learned yet
return f'I haven\'t extracted "{params[0]}" from chat yet.', False
# Copy this for the sentence
sentences[0] = key.copy()
else: # if there are no params
# Get starting key
key = self.db.get_start()
if key:
# Copy this for the sentence
sentences[0] = key.copy()
else:
# If nothing's ever been said
return "There is not enough learned information yet.", False
# Counter to prevent infinite loops (i.e. constantly generating <END> while below the
# minimum number of words to generate)
i = 0
while (
self.sentence_length(sentences) < self.max_sentence_length
and i < self.max_sentence_length * 2
):
# Use key to get next word
if i == 0:
# Prevent fetching <END> on the first word
word = self.db.get_next_initial(i, key)
else:
word = self.db.get_next(i, key)
i += 1
if word == "<END>" or word is None:
# Break, unless we are before the min_sentence_length
if i < self.min_sentence_length:
key = self.db.get_start()
# Ensure that the key can be generated. Otherwise we still stop.
if key:
# Start a new sentence
sentences.append([])
for entry in key:
sentences[-1].append(entry)
continue
break
# Otherwise add the word
sentences[-1].append(word)
# Shift the key so on the next iteration it gets the next item
key.pop(0)
key.append(word)
# If there were params, but the sentence resulting is identical to the params
# Then the params did not result in an actual sentence
# If so, restart without params
if len(params) > 0 and params == sentences[0]:
return "I haven't learned what to do with \"" + detokenize(
params[-self.key_length :]
) + '" yet.', False
return self.sent_separator.join(
detokenize(sentence) for sentence in sentences
), True
def sentence_length(self, sentences: List[List[str]]) -> int:
"""Given a list of tokens representing a sentence, return the number of words in there.
Args:
sentences (List[List[str]]): List of lists of tokens that make up a sentence,
where a token is a word or punctuation. For example:
[['Hello', ',', 'you', "'re", 'Tom', '!'], ['Yes', ',', 'I', 'am', '.']]
This would return 6.
Returns:
int: The number of words in the sentence.
"""
count = 0
for sentence in sentences:
for token in sentence:
if token not in string.punctuation and token[0] != "'":
count += 1
return count
def extract_modifiers(self, emotes: str) -> List[str]:
"""Extract emote modifiers from emotes, such as the the horizontal flip.
Args:
emotes (str): String containing all emotes used in the message.
Returns:
List[str]: List of strings that show modifiers, such as "_HZ" for horizontal flip.
"""
output = []
try:
while emotes:
u_index = emotes.index("_")
c_index = emotes.index(":", u_index)
output.append(emotes[u_index:c_index])
emotes = emotes[c_index:]
except ValueError:
pass
return output
def write_blacklist(self, blacklist: List[str]) -> None:
"""Write blacklist.txt given a list of banned words.
Args:
blacklist (List[str]): The list of banned words to write.
"""
logger.debug("Writing Blacklist...")
with open("blacklist.txt", "w") as f:
f.write("\n".join(sorted(blacklist, key=lambda x: len(x), reverse=True)))
logger.debug("Written Blacklist.")
def set_blacklist(self) -> None:
"""Read blacklist.txt and set `self.blacklist` to the list of banned words."""
logger.debug("Loading Blacklist...")
try:
with open("blacklist.txt", "r") as f:
self.blacklist = [line.replace("\n", "") for line in f.readlines()]
logger.debug("Loaded Blacklist.")
except FileNotFoundError:
logger.warning("Loading Blacklist Failed!")
self.blacklist = ["<start>", "<end>"]
self.write_blacklist(self.blacklist)
def send_help_message(self) -> None:
"""Send a Help message to the connected chat, as long as the bot wasn't disabled."""
if self._enabled:
logger.info("Help message sent.")
try:
self.ws.send_message(
"Learn how this bot generates sentences here: https://github.com/CubieDev/TwitchMarkovChain#how-it-works"
)
except socket.OSError as error:
logger.warning(
f"[OSError: {error}] upon sending help message. Ignoring."
)
def send_automatic_generation_message(self) -> None:
"""Send an automatic generation message to the connected chat.
As long as the bot wasn't disabled, just like if someone typed "!g" in chat.
"""
if self._enabled:
sentence, success = self.generate()
if success:
logger.info(sentence)
# Try to send a message. Just log a warning on fail
try:
self.ws.send_message(sentence)
except socket.OSError as error:
logger.warning(
f"[OSError: {error}] upon sending automatic generation message. Ignoring."
)
else:
logger.info(
"Attempted to output automatic generation message, but there is not enough learned information yet."
)
def send_whisper(self, user: str, message: str) -> None:
"""Optionally send a whisper, only if "WhisperCooldown" is True.
Args:
user (str): The user to potentially whisper.
message (str): The message to potentially whisper
"""
if self.whisper_cooldown:
self.ws.send_whisper(user, message)
def check_filter(self, message: str) -> bool:
"""Returns True if message contains a banned word.
Args:
message (str): The message to check.
"""
for word in tokenize(message):
if word.lower() in self.blacklist:
return True
return False
def check_if_our_command(self, message: str, *commands: "Tuple[str]") -> bool:
"""True if the first "word" of the message is in the tuple of commands
Args:
message (str): The message to check for a command.
commands (Tuple[str]): A tuple of commands.
Returns:
bool: True if the first word in message is one of the commands.
"""
return message.split()[0] in commands
def check_if_generate(self, message: str) -> bool:
"""True if the first "word" of the message is one of the defined generate commands.
Args:
message (str): The message to check for the generate command (i.e !generate or !g).
Returns:
bool: True if the first word in message is a generate command.
"""
return self.check_if_our_command(message, *self.generate_commands)
def check_if_other_command(self, message: str) -> bool:
"""True if the message is any command, except /me.
Is used to avoid learning and generating commands.
Args:
message (str): The message to check.
Returns:
bool: True if the message is any potential command (starts with a '!', '/' or '.')
with the exception of /me.
"""
return message.startswith(("!", "/", ".")) and not message.startswith("/me")
def check_if_permissions(self, m: Message) -> bool:
"""True if the user has heightened permissions.
E.g. permissions to bypass cooldowns, update settings, disable the bot, etc.
True for the streamer themselves, and the users set as the allowed users.
Args:
m (Message): The Message object that was sent from Twitch.
Has `user` and `channel` attributes.
"""
return m.user == m.channel or m.user in self.allowed_users
def check_link(self, message: str) -> bool:
"""True if `message` contains a link.
Args:
message (str): The message to check for a link.
Returns:
bool: True if the message contains a link.
"""
return self.link_regex.search(message)
if __name__ == "__main__":
MarkovChain()

View file

@ -0,0 +1,314 @@
# TwitchMarkovChain
Twitch Bot for generating messages based on what it learned from chat
---
## Explanation
When the bot has started, it will start listening to chat messages in the channel listed in the `settings.json` file. Any chat message not sent by a denied user will be learned from. Whenever someone then requests a message to be generated, a [Markov Chain](https://en.wikipedia.org/wiki/Markov_chain) will be used with the learned data to generate a sentence. **Note that the bot is unaware of the meaning of any of its inputs and outputs. This means it can use bad language if it was taught to use bad language by people in chat. You can add a list of banned words it should never learn or say. Use at your own risk.**
Whenever a message is deleted from chat, it's contents will be unlearned at 5 times the rate a normal message is learned from.
The bot will avoid learning from commands, or from messages containing links.
---
## How it works
### Sentence Parsing
To explain how the bot works, I will provide an example situation with two messages that are posted in Twitch chat. The messages are:
> Curly fries are the worst kind of fries
> Loud people are the reason I don't go to the movies anymore
Let's start with the first sentence and parse it like the bot will. To do so, we will split up the sentence in sections of `keyLength + 1` words. As `keyLength` has been set to `2` in the [Settings](#settings) section, each section has `3` words.
```txt
Curly fries are the worst kind of fries
[Curly fries:are]
[fries are:the]
[are the:worst]
[the worst:kind]
[worst kind:of]
[kind of:fries]
```
For each of these sections of three words, the last word is considered the output, while all other words it are considered inputs.
These words are then turned into a variation of a [Grammar](https://en.wikipedia.org/wiki/Formal_grammar):
```txt
"Curly fries" -> "are"
"fries are" -> "the"
"are the" -> "worst"
"the worst" -> "kind"
"worst kind" -> "of"
"kind of" -> "fries"
```
This can be considered a mathematical function that, when given input "the worst", will output "kind".
In order for the program to know where sentences begin, we also add the first `keyLength` words to a seperate Database table, where a list of possible starts of sentences reside.
This exact same process is applied to the second sentence as well. After doing so, the resulting grammar (and our corresponding database table) looks like:
```txt
"Curly fries" -> "are"
"fries are" -> "the"
"are the" -> "worst" | "reason"
"the worst" -> "kind"
"worst kind" -> "of"
"kind of" -> "fries"
"Loud people" -> "are"
"people are" -> "the"
"the reason" -> "I"
"reason I" -> "don't"
"I don't" -> "go"
"don't go" -> "to"
"go to" -> "the"
"to the" -> "movies"
"the movies" -> "anymore"
```
and in the database table for starts of sentences:
```txt
"Curly fries"
"Loud people"
```
Note that the | is considered to be _"or"_. In the case of the bold text above, it could be read as: if the given input is "are the", then the output is either _"worst"_ **or** _"reason"_.
In practice, more frequent phrases will have higher precedence. The more often a phrase is said, the more likely it is to be generated.
---
### Generation
When a message is generated with `!generate`, a random start of a sentence is picked from the database table of starts of sentences. In our example the randomly picked start is _"Curly fries"_.
Now, in a loop:
- The output for the input is generated via the grammar.
- And the input for the next iteration in the loop is shifted:
- Remove the first word from the input.
- Add the new output word to the end of the input.
So, the input starts as _"Curly Fries"_. The output for this input is generated via the grammar, which gives us _"are"_. Then, the input is updated. _"Curly"_ is removed, and _"are"_ is added to the input. The new input for the next iteration will be _"Fries are"_ as a result. This process repeats until no more words can be generated, or if a word limit is reached.
A more programmatic example of this would be this:
```python
# This initial sentence is either from the database for starts of sentences,
# or from words passed in Twitch chat
sentence = ["Curly", "fries"]
for i in range(sentence_length):
# Generate a word using last 2 words in the partial sentence,
# and append it to the partial sentence
sentence.append(generate(sentence[-2:]))
```
It's common for an input sequence to have multiple possible outputs, as we can see in the bold part of the previous grammar. This allows learned information from multiple messages to be merged into one message. For instance, some potential outputs from the given example are
> Curly fries are the reason I don't go to the movies anymore
or
> Loud people are the worst kind of fries
---
## Commands
Chat members can generate chat-like messages using the following commands (Note that they are aliases):
```txt
!generate [words]
!g [words]
```
Example:
```txt
!g Curly
```
Result (for example):
```txt
Curly fries are the reason I don't go to the movies anymore
```
- The bot will, when given this command, try to complete the start of the sentence which was given.
- If it cannot, an appropriate error message will be sent to chat.
- Any number of words may be given, including none at all.
- Everyone can use it.
Furthermore, chat members can find a link to [How it works](#how-it-works) by using one of the following commands:
```txt
!ghelp
!genhelp
!generatehelp
```
The use of this command makes the bot post this message in chat:
> Learn how this bot generates sentences here: <https://github.com/CubieDev/TwitchMarkovChain#how-it-works>
---
### Streamer commands
All of these commands can be whispered to the bot account, or typed in chat.
To disable the bot from generating messages, while still learning from regular chat messages:
```txt
!disable
```
After disabling the bot, it can be re-enabled using:
```txt
!enable
```
Changing the cooldown between generations is possible with one of the following two commands:
```txt
!setcooldown <seconds>
!setcd <seconds>
```
Example:
```txt
!setcd 30
```
Which sets the cooldown between generations to 30 seconds.
---
### Moderator commands
All of these commands must be whispered to the bot account.
Moderators (and the broadcaster) can modify the blacklist to prevent the bot learning words it shouldn't.
To add `word` to the blacklist, a moderator can whisper the bot:
```txt
!blacklist <word>
```
Similarly, to remove `word` from the blacklist, a moderator can whisper the bot:
```txt
!whitelist <word>
```
And to check whether `word` is already on the blacklist or not, a moderator can whisper the bot:
```txt
!check <word>
```
---
## Settings
This bot is controlled by a `settings.json` file, which has the following structure:
```json
{
"Host": "irc.chat.twitch.tv",
"Port": 6667,
"Channel": "#<channel>",
"Nickname": "<name>",
"Authentication": "oauth:<auth>",
"DeniedUsers": ["StreamElements", "Nightbot", "Moobot", "Marbiebot"],
"AllowedUsers": [],
"Cooldown": 20,
"KeyLength": 2,
"MaxSentenceWordAmount": 25,
"MinSentenceWordAmount": -1,
"HelpMessageTimer": 18000,
"AutomaticGenerationTimer": -1,
"WhisperCooldown": true,
"EnableGenerateCommand": true,
"SentenceSeparator": " - ",
"AllowGenerateParams": true,
"GenerateCommands": ["!generate", "!g"]
}
```
| **Parameter** | **Meaning** | **Example** |
| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------- |
| `Host` | The URL that will be used. Do not change. | `"irc.chat.twitch.tv"` |
| `Port` | The Port that will be used. Do not change. | `6667` |
| `Channel` | The Channel that will be connected to. | `"#CubieDev"` |
| `Nickname` | The Username of the bot account. | `"CubieB0T"` |
| `Authentication` | The OAuth token for the bot account. | `"oauth:pivogip8ybletucqdz4pkhag6itbax"` |
| `DeniedUsers` | The list of (bot) accounts whose messages should not be learned from. The bot itself it automatically added to this. | `["StreamElements", "Nightbot", "Moobot", "Marbiebot"]` |
| `AllowedUsers` | A list of users with heightened permissions. Gives these users the same power as the channel owner, allowing them to bypass cooldowns, set cooldowns, disable or enable the bot, etc. | `["Michelle", "Cubie"]` |
| `Cooldown` | A cooldown in seconds between successful generations. If a generation fails (eg inputs it can't work with), then the cooldown is not reset and another generation can be done immediately. | `20` |
| `KeyLength` | A technical parameter which, in my previous implementation, would affect how closely the output matches the learned inputs. In the current implementation the database structure does not allow this parameter to be changed. Do not change. | `2` |
| `MaxSentenceWordAmount` | The maximum number of words that can be generated. Prevents absurdly long and spammy generations. | `25` |
| `MinSentenceWordAmount` | The minimum number of words that can be generated. Might generate multiple sentences, separated by the value from `SentenceSeparator`. Prevents very short generations. -1 to disable. | `-1` |
| `HelpMessageTimer` | The amount of seconds between sending help messages that links to [How it works](#how-it-works). -1 for no help messages. Defaults to once every 5 hours. | `18000` |
| `AutomaticGenerationTimer` | The amount of seconds between automatically sending a generated message, as if someone wrote `!g`. -1 for no automatic generations. | `-1` |
| `WhisperCooldown` | Allows the bot to whisper a user the remaining cooldown after that user has attempted to generate a message. | `true` |
| `EnableGenerateCommand` | Globally enables/disables the generate command. | `true` |
| `SentenceSeparator` | The separator between multiple sentences. Only relevant if `MinSentenceWordAmount` > 0, as only then can multiple sentences be generated. Sensible values for this might be `", "`, `". "`, `" - "` or `" "`. | `" - "` |
| `AllowGenerateParams` | Allow chat to supply a partial sentence which the bot finishes, e.g. `!generate hello, I am`. If `false`, all values after the generation command will be ignored. | `true` |
| `GenerateCommands` | The generation commands that the bot will listen for. Defaults to `["!generate", "!g"]`. Useful if your chat is used to commands with `~`, `-`, `/`, etc. | `["!generate", "!g"]` |
_Note that the example OAuth token is not an actual token, but merely a generated string to give an indication what it might look like._
I got my real OAuth token from <https://twitchapps.com/tmi/>.
---
### Blacklist
You may add words to a blacklist by adding them on a separate line in `blacklist.txt`. Each word is case insensitive. By default, this file only contains `<start>` and `<end>`, which are required for the current implementation.
Words can also be added or removed from the blacklist via whispers, as is described in the [Moderator Command](#moderator-commands) section.
---
## Requirements
- [Python 3.6+](https://www.python.org/downloads/)
- [Module requirements](requirements.txt)
- Install these modules using `pip install -r requirements.txt` in the commandline.
Among these modules is my own [TwitchWebsocket](https://github.com/tomaarsen/TwitchWebsocket) wrapper, which makes making a Twitch chat bot a lot easier.
This repository can be seen as an implementation using this wrapper.
---
### Contributors
My gratitude is extended to the following contributors who've decided to help out.
* [@DoctorInsano](https://github.com/DoctorInsano) - Several small fixes and improvements in [v1.0](https://github.com/tomaarsen/TwitchMarkovChain/releases/tag/v1.0).
* [@justinrusso](https://github.com/justinrusso) - Several features, refactors and fixes, that represent the core of [v2.0](https://github.com/tomaarsen/TwitchMarkovChain/releases/tag/v2.0) and [v2.1](https://github.com/tomaarsen/TwitchMarkovChain/releases/tag/v2.1).
---
## Other Twitch Bots
- [TwitchAIDungeon](https://github.com/CubieDev/TwitchAIDungeon)
- [TwitchGoogleTranslate](https://github.com/CubieDev/TwitchGoogleTranslate)
- [TwitchCubieBotGUI](https://github.com/CubieDev/TwitchCubieBotGUI)
- [TwitchCubieBot](https://github.com/CubieDev/TwitchCubieBot)
- [TwitchRandomRecipe](https://github.com/CubieDev/TwitchRandomRecipe)
- [TwitchUrbanDictionary](https://github.com/CubieDev/TwitchUrbanDictionary)
- [TwitchRhymeBot](https://github.com/CubieDev/TwitchRhymeBot)
- [TwitchWeather](https://github.com/CubieDev/TwitchWeather)
- [TwitchDeathCounter](https://github.com/CubieDev/TwitchDeathCounter)
- [TwitchSuggestDinner](https://github.com/CubieDev/TwitchSuggestDinner)
- [TwitchPickUser](https://github.com/CubieDev/TwitchPickUser)
- [TwitchSaveMessages](https://github.com/CubieDev/TwitchSaveMessages)
- [TwitchMMLevelPickerGUI](https://github.com/CubieDev/TwitchMMLevelPickerGUI) (Mario Maker 2 specific bot)
- [TwitchMMLevelQueueGUI](https://github.com/CubieDev/TwitchMMLevelQueueGUI) (Mario Maker 2 specific bot)
- [TwitchPackCounter](https://github.com/CubieDev/TwitchPackCounter) (Streamer specific bot)
- [TwitchDialCheck](https://github.com/CubieDev/TwitchDialCheck) (Streamer specific bot)
- [TwitchSendMessage](https://github.com/CubieDev/TwitchSendMessage) (Meant for debugging purposes)

View file

@ -0,0 +1,205 @@
import json
import os
import logging
from functools import lru_cache
from pathlib import Path
from typing import List
try:
from typing import TypedDict
except ImportError:
TypedDict = object
logger = logging.getLogger(__name__)
class SettingsData(TypedDict):
Host: str
Port: int
Channel: str
Nickname: str
Authentication: str
DeniedUsers: List[str]
AllowedUsers: List[str]
Cooldown: int
KeyLength: int
MaxSentenceWordAmount: int
MinSentenceWordAmount: int
HelpMessageTimer: int
AutomaticGenerationTimer: int
WhisperCooldown: bool
EnableGenerateCommand: bool
SentenceSeparator: str
class Settings:
DEFAULTS: SettingsData = {
"Host": "irc.chat.twitch.tv",
"Port": 6667,
"Channel": "#<channel>",
"Nickname": "<name>",
"Authentication": "oauth:<auth>",
"DeniedUsers": ["StreamElements", "Nightbot", "Moobot", "Marbiebot"],
"AllowedUsers": [],
"Cooldown": 20,
"KeyLength": 2,
"MaxSentenceWordAmount": 25,
"MinSentenceWordAmount": -1,
"HelpMessageTimer": 60 * 60 * 5, # 18000 seconds, 5 hours
"AutomaticGenerationTimer": -1,
"WhisperCooldown": True,
"EnableGenerateCommand": True,
"SentenceSeparator": " - ",
"AllowGenerateParams": True,
"GenerateCommands": ["!generate", "!g"],
}
def __init__(self, settings_path: Path | None = None) -> None:
self.settings_path = settings_path or Path("settings.json")
@lru_cache(maxsize=1)
def read_settings(self) -> dict:
self.update_v2()
try:
# Try to load the file using json.
# And pass the data to the Bot class instance if this succeeds.
with self.settings_path.open("r") as f:
text_settings = f.read()
settings: SettingsData = json.loads(text_settings)
self.update_v1(settings)
# Check if any settings keys are missing, and if so, write the defaults
# to the settings.json
if settings.keys() != Settings.DEFAULTS.keys():
missing_keys = set(Settings.DEFAULTS.keys()) - set(settings.keys())
# Log the missing keys
logger.info(
f"The following keys were missing from {self.settings_path}: {', '.join(map(repr, missing_keys))}."
)
logger.info(
f"These defaults of these values were used, and added to {self.settings_path}. Default behaviour will not change."
)
# Add missing defaults
settings = {**Settings.DEFAULTS, **settings}
self.write_settings_file(settings)
return settings
except ValueError:
logger.error("Error in settings file.")
raise ValueError("Error in settings file.")
except FileNotFoundError:
self.write_default_settings_file()
raise ValueError("Please fix your settings file that was just generated.")
def update_v1(self, settings: SettingsData) -> None:
"""Update settings file to remove the BannedWords field, in favor for a blacklist.txt file.
Args:
settings (SettingsData): [description]
"""
# "BannedWords" is only a key in the settings in older versions.
# We moved to a separate file for blacklisted words.
if "BannedWords" in settings:
logger.info("Updating Blacklist system to new version...")
try:
with open("blacklist.txt", "r+") as f:
logger.info("Moving Banned Words to the blacklist.txt file...")
# Read the data, and split by word or phrase, then add BannedWords
banned_list = f.read().split("\n") + settings["BannedWords"]
# Remove duplicates and sort by length, longest to shortest
banned_list = sorted(
list(set(banned_list)), key=lambda x: len(x), reverse=True
)
# Clear file, and then write in the new data
f.seek(0)
f.truncate(0)
f.write("\n".join(banned_list))
logger.info("Moved Banned Words to the blacklist.txt file.")
except FileNotFoundError:
with open("blacklist.txt", "w") as f:
logger.info("Moving Banned Words to a new blacklist.txt file...")
# Remove duplicates and sort by length, longest to shortest
banned_list = sorted(
list(set(settings["BannedWords"])),
key=lambda x: len(x),
reverse=True,
)
f.write("\n".join(banned_list))
logger.info("Moved Banned Words to a new blacklist.txt file.")
# Remove BannedWords list from data dictionary, and then write it to the settings file
del settings["BannedWords"]
with self.settings_path.open("w") as f:
f.write(json.dumps(settings, indent=4, separators=(",", ": ")))
logger.info("Updated Blacklist system to new version.")
def update_v2(
self,
) -> None:
"""Converts `settings.txt` to `settings.json`, and adds missing new fields."""
try:
# Try to load the old settings.txt file using json.
with self.settings_path.open("r") as f:
settings = f.read()
data: SettingsData = json.loads(settings)
# Add missing fields from Settings.DEFAULT to data
corrected_data = {**Settings.DEFAULTS, **data}
# Write the new settings file
with self.settings_path.open("w") as f:
f.write(json.dumps(corrected_data, indent=4, separators=(",", ": ")))
os.remove("settings.txt")
logger.info(
'Updated Settings system to new version. See "settings.json" for new fields, and README.md for information on these fields.'
)
except FileNotFoundError:
# If settings.txt does not exist, then we're not on an old version.
pass
def write_default_settings_file(self) -> None:
"""Create a standardised settings file with default values."""
self.write_settings_file(Settings.DEFAULTS)
def write_settings_file(self, settings: SettingsData) -> None:
with open(self.settings_path, "w") as f:
f.write(json.dumps(settings, indent=4, separators=(",", ": ")))
def update_cooldown(self, cooldown: int) -> None:
"""Update the "Cooldown" value in the settings file.
Args:
cooldown (int): The integer representing the amount of seconds of cooldown
between outputted generations.
"""
with self.settings_path.open("r") as f:
settings = f.read()
data = json.loads(settings)
data["Cooldown"] = cooldown
with self.settings_path.open("w") as f:
f.write(json.dumps(data, indent=4, separators=(",", ": ")))
def get_channel(self) -> str:
"""Get the "Channel" value from the settings file.
Returns:
str: The name of the Channel described in the settings file.
Stripped of "#" and converted to lowercase.
"""
settings = self.read_settings()
return settings["Channel"].replace("#", "").lower()
@lru_cache(maxsize=1)
def get_settings(settings_path: Path | None = None) -> Settings:
return Settings(settings_path)

View file

@ -0,0 +1,28 @@
import threading
import logging
from typing import Callable
logger = logging.getLogger(__name__)
class LoopingTimer(threading.Thread):
"""
Thread that will continuously run `target(*args, **kwargs)`
every `interval` seconds, until program termination.
"""
def __init__(
self, interval: int, target: Callable[[], None], *args, **kwargs
) -> None:
threading.Thread.__init__(self)
self.interval = interval
self.target = target
self.args = args
self.kwargs = kwargs
self.stopped = threading.Event()
self.daemon = True
def run(self):
while not self.stopped.wait(self.interval):
self.target(*self.args, **self.kwargs)

View file

@ -0,0 +1,131 @@
import re
from typing import List
from nltk.tokenize.destructive import NLTKWordTokenizer
from nltk.tokenize.treebank import TreebankWordDetokenizer
class MarkovChainTokenizer(NLTKWordTokenizer):
# Starting quotes.
STARTING_QUOTES = [
(re.compile("([«“‘„]|[`]+)", re.U), r" \1 "),
# (re.compile(r"^\""), r"``"), # Custom for MarkovChain: Don't use `` as starting quotes
(re.compile(r"(``)"), r" \1 "),
(re.compile(r"([ \(\[{<])(\"|\'{2})"), r"\1 '' "),
(re.compile(r"(?i)(\')(?!re|ve|ll|m|t|s|d)(\w)\b", re.U), r"\1 \2"),
]
PUNCTUATION = [
(re.compile(r""), r"'"),
(re.compile(r'([^\.])(\.)([\]\)}>"\'' "»”’ " r"]*)\s*$", re.U), r"\1 \2 \3 "),
(re.compile(r"([:,])([^\d])"), r" \1 \2"),
(re.compile(r"([:,])$"), r" \1 "),
# See https://github.com/nltk/nltk/pull/2322
(re.compile(r"\.{2,}", re.U), r" \g<0> "),
# Custom for MarkovChain: Removed the "@"
(re.compile(r"[;#$%&]"), r" \g<0> "),
(
re.compile(r'([^\.])(\.)([\]\)}>"\']*)\s*$'),
r"\1 \2\3 ",
), # Handles the final period.
(re.compile(r"[?!]"), r" \g<0> "),
(re.compile(r"([^'])' "), r"\1 ' "),
# See https://github.com/nltk/nltk/pull/2322
(re.compile(r"[*]", re.U), r" \g<0> "),
]
EMOTICON_RE = re.compile(
r"""
(
[<>]?
[:;=8] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8] # eyes
[<>]?
|
<3 # heart
)""",
re.VERBOSE | re.I | re.UNICODE,
)
_tokenize = MarkovChainTokenizer().tokenize
_detokenize = TreebankWordDetokenizer().tokenize
def tokenize(sentence: str) -> List[str]:
"""Word tokenize, separating commas, dots, apostrophes, etc.
Uses nltk's `NLTKWordTokenizer`, but does not consider "@" to be punctuation.
Also doesn't convert "hello" to ``hello'', but to ''hello''.
Furthermore, doesn't split emoticons, i.e. "<3" or ":)"
Args:
sentence (str): Input sentence.
Returns:
List[str]: Tokenized output of the sentence.
"""
output = []
match = EMOTICON_RE.search(sentence)
while match:
output += _tokenize(sentence[: match.start()].strip())
output += [match.group()]
sentence = sentence[match.end() :].strip()
match = EMOTICON_RE.search(sentence)
output += _tokenize(sentence)
return output
def detokenize(tokenized: List[str]) -> str:
"""Detokenize a tokenized list of words and punctuation.
Converted in a less naïve way than `" ".join(tokenized)`
Preprocess tokenized by placing spaces before the 1st, 3rd, 5th, etc. quote,
and by placing spaces after the 2nd, 4th, 6th, etc. quote.
Then, ["He", "said", "''", "heya", "!", "''", "yesterday", "."] will be detokenized to
> He said ''heya!'' yesterday.
instead of
> He said''heya!''yesterday.
Args:
tokenized (List[str]): Input tokens, e.g. ["Hello", ",", "I", "'m", "Tom"]
Returns:
str: The correct string sentence, e.g. "Hello, I'm Tom"
"""
indices = [
index for index, token in enumerate(tokenized) if token in ("''", "'", '"')
]
# Replace '' with ", works better with more recent NLTK versions
tokenized_copy = [token if token != "''" else '"' for token in tokenized]
# We get the reverse of the enumerate, as we modify the list we took the indices from
enumerated = list(enumerate(indices))
for i, index in enumerated[::-1]:
# Opening quote
if i % 2 == 0:
# If there is another word, merge with that word and prepend a space
if len(tokenized) > index + 1:
tokenized_copy[index : index + 2] = [
"".join(tokenized_copy[index : index + 2])
]
# Closing quote
else:
# If there is a previous word, merge with that word and append a space
if index > 0:
tokenized_copy[index - 1 : index + 1] = [
"".join(tokenized_copy[index - 1 : index + 1])
]
return _detokenize(tokenized_copy).strip()

View file

View file

@ -0,0 +1,10 @@
import logging
class LogHandler(logging.Handler):
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
def emit(self, record):
self.log_queue.put(self.format(record))

74
src/markovbot_gui/main.py Normal file
View file

@ -0,0 +1,74 @@
import platformdirs
from kivy.app import App
from kivy.metrics import dp
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.widget import Widget
from src.markovbot_gui.bot_runner import BotRunner
from src.markovbot_gui.config_window import ConfigWindow
class BotApp(App):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.config_path = (
platformdirs.user_config_path("markovbot_gui") / "settings.json"
)
def run_bot(self, instance):
bot_runner = BotRunner(config_path=self.config_path)
popup = Popup(
title="Bot Running",
content=bot_runner,
size_hint=(None, None),
size=(dp(600), dp(400)),
auto_dismiss=False,
)
popup.open()
def run_config(self, instance):
config_window = ConfigWindow(config_path=self.config_path)
popup = Popup(
title=f"Bot Configuration, available at {self.config_path}",
content=config_window,
size_hint=(None, None),
size=(dp(400), dp(300)),
auto_dismiss=False,
)
# Add close button
close_button = Button(
text="Close",
size_hint=(None, None),
size=(dp(100), dp(40)),
pos_hint={"center_x": 0.5},
)
close_button.bind(on_release=popup.dismiss)
config_window.add_widget(close_button)
popup.open()
def build(self):
widget = Widget()
layout = BoxLayout(size_hint=(1, None), height=50)
run_button = Button(text="Run bot")
run_button.bind(on_release=self.run_bot)
layout.add_widget(run_button)
config_button = Button(text="Open config")
config_button.bind(on_release=self.run_config)
layout.add_widget(config_button)
root = BoxLayout(orientation="vertical")
root.add_widget(widget)
root.add_widget(layout)
return root
if __name__ == "__main__":
BotApp().run()

554
uv.lock generated Normal file
View file

@ -0,0 +1,554 @@
version = 1
requires-python = ">=3.11"
[[package]]
name = "altgraph"
version = "0.17.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/de/a8/7145824cf0b9e3c28046520480f207df47e927df83aa9555fb47f8505922/altgraph-0.17.4.tar.gz", hash = "sha256:1b5afbb98f6c4dcadb2e2ae6ab9fa994bbb8c1d75f4fa96d340f9437ae454406", size = 48418 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/4d/3f/3bc3f1d83f6e4a7fcb834d3720544ca597590425be5ba9db032b2bf322a2/altgraph-0.17.4-py2.py3-none-any.whl", hash = "sha256:642743b4750de17e655e6711601b077bc6598dbfa3ba5fa2b2a35ce12b508dff", size = 21212 },
]
[[package]]
name = "certifi"
version = "2024.8.30"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/b0/ee/9b19140fe824b367c04c5e1b369942dd754c4c5462d5674002f75c4dedc1/certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9", size = 168507 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/12/90/3c9ff0512038035f59d279fddeb79f5f1eccd8859f06d6163c58798b9487/certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", size = 167321 },
]
[[package]]
name = "charset-normalizer"
version = "3.4.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f2/4f/e1808dc01273379acc506d18f1504eb2d299bd4131743b9fc54d7be4df1e/charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e", size = 106620 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9c/61/73589dcc7a719582bf56aae309b6103d2762b526bffe189d635a7fcfd998/charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c", size = 193339 },
{ url = "https://files.pythonhosted.org/packages/77/d5/8c982d58144de49f59571f940e329ad6e8615e1e82ef84584c5eeb5e1d72/charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944", size = 124366 },
{ url = "https://files.pythonhosted.org/packages/bf/19/411a64f01ee971bed3231111b69eb56f9331a769072de479eae7de52296d/charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee", size = 118874 },
{ url = "https://files.pythonhosted.org/packages/4c/92/97509850f0d00e9f14a46bc751daabd0ad7765cff29cdfb66c68b6dad57f/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c", size = 138243 },
{ url = "https://files.pythonhosted.org/packages/e2/29/d227805bff72ed6d6cb1ce08eec707f7cfbd9868044893617eb331f16295/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6", size = 148676 },
{ url = "https://files.pythonhosted.org/packages/13/bc/87c2c9f2c144bedfa62f894c3007cd4530ba4b5351acb10dc786428a50f0/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea", size = 141289 },
{ url = "https://files.pythonhosted.org/packages/eb/5b/6f10bad0f6461fa272bfbbdf5d0023b5fb9bc6217c92bf068fa5a99820f5/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc", size = 142585 },
{ url = "https://files.pythonhosted.org/packages/3b/a0/a68980ab8a1f45a36d9745d35049c1af57d27255eff8c907e3add84cf68f/charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5", size = 144408 },
{ url = "https://files.pythonhosted.org/packages/d7/a1/493919799446464ed0299c8eef3c3fad0daf1c3cd48bff9263c731b0d9e2/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594", size = 139076 },
{ url = "https://files.pythonhosted.org/packages/fb/9d/9c13753a5a6e0db4a0a6edb1cef7aee39859177b64e1a1e748a6e3ba62c2/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c", size = 146874 },
{ url = "https://files.pythonhosted.org/packages/75/d2/0ab54463d3410709c09266dfb416d032a08f97fd7d60e94b8c6ef54ae14b/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365", size = 150871 },
{ url = "https://files.pythonhosted.org/packages/8d/c9/27e41d481557be53d51e60750b85aa40eaf52b841946b3cdeff363105737/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129", size = 148546 },
{ url = "https://files.pythonhosted.org/packages/ee/44/4f62042ca8cdc0cabf87c0fc00ae27cd8b53ab68be3605ba6d071f742ad3/charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236", size = 143048 },
{ url = "https://files.pythonhosted.org/packages/01/f8/38842422988b795220eb8038745d27a675ce066e2ada79516c118f291f07/charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99", size = 94389 },
{ url = "https://files.pythonhosted.org/packages/0b/6e/b13bd47fa9023b3699e94abf565b5a2f0b0be6e9ddac9812182596ee62e4/charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27", size = 101752 },
{ url = "https://files.pythonhosted.org/packages/d3/0b/4b7a70987abf9b8196845806198975b6aab4ce016632f817ad758a5aa056/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6", size = 194445 },
{ url = "https://files.pythonhosted.org/packages/50/89/354cc56cf4dd2449715bc9a0f54f3aef3dc700d2d62d1fa5bbea53b13426/charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf", size = 125275 },
{ url = "https://files.pythonhosted.org/packages/fa/44/b730e2a2580110ced837ac083d8ad222343c96bb6b66e9e4e706e4d0b6df/charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db", size = 119020 },
{ url = "https://files.pythonhosted.org/packages/9d/e4/9263b8240ed9472a2ae7ddc3e516e71ef46617fe40eaa51221ccd4ad9a27/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1", size = 139128 },
{ url = "https://files.pythonhosted.org/packages/6b/e3/9f73e779315a54334240353eaea75854a9a690f3f580e4bd85d977cb2204/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03", size = 149277 },
{ url = "https://files.pythonhosted.org/packages/1a/cf/f1f50c2f295312edb8a548d3fa56a5c923b146cd3f24114d5adb7e7be558/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284", size = 142174 },
{ url = "https://files.pythonhosted.org/packages/16/92/92a76dc2ff3a12e69ba94e7e05168d37d0345fa08c87e1fe24d0c2a42223/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15", size = 143838 },
{ url = "https://files.pythonhosted.org/packages/a4/01/2117ff2b1dfc61695daf2babe4a874bca328489afa85952440b59819e9d7/charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8", size = 146149 },
{ url = "https://files.pythonhosted.org/packages/f6/9b/93a332b8d25b347f6839ca0a61b7f0287b0930216994e8bf67a75d050255/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2", size = 140043 },
{ url = "https://files.pythonhosted.org/packages/ab/f6/7ac4a01adcdecbc7a7587767c776d53d369b8b971382b91211489535acf0/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719", size = 148229 },
{ url = "https://files.pythonhosted.org/packages/9d/be/5708ad18161dee7dc6a0f7e6cf3a88ea6279c3e8484844c0590e50e803ef/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631", size = 151556 },
{ url = "https://files.pythonhosted.org/packages/5a/bb/3d8bc22bacb9eb89785e83e6723f9888265f3a0de3b9ce724d66bd49884e/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b", size = 149772 },
{ url = "https://files.pythonhosted.org/packages/f7/fa/d3fc622de05a86f30beea5fc4e9ac46aead4731e73fd9055496732bcc0a4/charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565", size = 144800 },
{ url = "https://files.pythonhosted.org/packages/9a/65/bdb9bc496d7d190d725e96816e20e2ae3a6fa42a5cac99c3c3d6ff884118/charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7", size = 94836 },
{ url = "https://files.pythonhosted.org/packages/3e/67/7b72b69d25b89c0b3cea583ee372c43aa24df15f0e0f8d3982c57804984b/charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9", size = 102187 },
{ url = "https://files.pythonhosted.org/packages/f3/89/68a4c86f1a0002810a27f12e9a7b22feb198c59b2f05231349fbce5c06f4/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114", size = 194617 },
{ url = "https://files.pythonhosted.org/packages/4f/cd/8947fe425e2ab0aa57aceb7807af13a0e4162cd21eee42ef5b053447edf5/charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed", size = 125310 },
{ url = "https://files.pythonhosted.org/packages/5b/f0/b5263e8668a4ee9becc2b451ed909e9c27058337fda5b8c49588183c267a/charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250", size = 119126 },
{ url = "https://files.pythonhosted.org/packages/ff/6e/e445afe4f7fda27a533f3234b627b3e515a1b9429bc981c9a5e2aa5d97b6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920", size = 139342 },
{ url = "https://files.pythonhosted.org/packages/a1/b2/4af9993b532d93270538ad4926c8e37dc29f2111c36f9c629840c57cd9b3/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64", size = 149383 },
{ url = "https://files.pythonhosted.org/packages/fb/6f/4e78c3b97686b871db9be6f31d64e9264e889f8c9d7ab33c771f847f79b7/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23", size = 142214 },
{ url = "https://files.pythonhosted.org/packages/2b/c9/1c8fe3ce05d30c87eff498592c89015b19fade13df42850aafae09e94f35/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc", size = 144104 },
{ url = "https://files.pythonhosted.org/packages/ee/68/efad5dcb306bf37db7db338338e7bb8ebd8cf38ee5bbd5ceaaaa46f257e6/charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d", size = 146255 },
{ url = "https://files.pythonhosted.org/packages/0c/75/1ed813c3ffd200b1f3e71121c95da3f79e6d2a96120163443b3ad1057505/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88", size = 140251 },
{ url = "https://files.pythonhosted.org/packages/7d/0d/6f32255c1979653b448d3c709583557a4d24ff97ac4f3a5be156b2e6a210/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90", size = 148474 },
{ url = "https://files.pythonhosted.org/packages/ac/a0/c1b5298de4670d997101fef95b97ac440e8c8d8b4efa5a4d1ef44af82f0d/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b", size = 151849 },
{ url = "https://files.pythonhosted.org/packages/04/4f/b3961ba0c664989ba63e30595a3ed0875d6790ff26671e2aae2fdc28a399/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d", size = 149781 },
{ url = "https://files.pythonhosted.org/packages/d8/90/6af4cd042066a4adad58ae25648a12c09c879efa4849c705719ba1b23d8c/charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482", size = 144970 },
{ url = "https://files.pythonhosted.org/packages/cc/67/e5e7e0cbfefc4ca79025238b43cdf8a2037854195b37d6417f3d0895c4c2/charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67", size = 94973 },
{ url = "https://files.pythonhosted.org/packages/65/97/fc9bbc54ee13d33dc54a7fcf17b26368b18505500fc01e228c27b5222d80/charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b", size = 102308 },
{ url = "https://files.pythonhosted.org/packages/bf/9b/08c0432272d77b04803958a4598a51e2a4b51c06640af8b8f0f908c18bf2/charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079", size = 49446 },
]
[[package]]
name = "click"
version = "8.1.7"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "platform_system == 'Windows'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/96/d3/f04c7bfcf5c1862a2a5b845c6b2b360488cf47af55dfa79c98f6a6bf98b5/click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de", size = 336121 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/00/2e/d53fa4befbf2cfa713304affc7ca780ce4fc1fd8710527771b58311a3229/click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", size = 97941 },
]
[[package]]
name = "colorama"
version = "0.4.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 },
]
[[package]]
name = "docutils"
version = "0.21.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ae/ed/aefcc8cd0ba62a0560c3c18c33925362d46c6075480bfa4df87b28e169a9/docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f", size = 2204444 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/8f/d7/9322c609343d929e75e7e5e6255e614fcc67572cfd083959cdef3b7aad79/docutils-0.21.2-py3-none-any.whl", hash = "sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2", size = 587408 },
]
[[package]]
name = "idna"
version = "3.10"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
]
[[package]]
name = "joblib"
version = "1.4.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/64/33/60135848598c076ce4b231e1b1895170f45fbcaeaa2c9d5e38b04db70c35/joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e", size = 2116621 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/91/29/df4b9b42f2be0b623cbd5e2140cafcaa2bef0759a00b7b70104dcfe2fb51/joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6", size = 301817 },
]
[[package]]
name = "kivy"
version = "2.3.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "docutils" },
{ name = "kivy-deps-angle", marker = "sys_platform == 'win32'" },
{ name = "kivy-deps-glew", marker = "sys_platform == 'win32'" },
{ name = "kivy-deps-sdl2", marker = "sys_platform == 'win32'" },
{ name = "kivy-garden" },
{ name = "pygments" },
{ name = "pypiwin32", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b0/8d/262e921d3cdfdca4c0fba6834235aa2abb6d569f781924a850d191cd23f1/Kivy-2.3.0.tar.gz", hash = "sha256:e8b8610c7f8ef6db908a139d369b247378f18105c96981e492eab2b4706c79d5", size = 23965268 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/78/a6/e5fc5e60ad6de274f758407d1aa7983680d9c50ee837ea0f0cf95755ab3c/Kivy-2.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ec36ab3b74a525fa463b61895d3a2d76e9e4d206641233defae0d604e75df7ad", size = 11303894 },
{ url = "https://files.pythonhosted.org/packages/78/ef/3beacbd7ee3f7b0352842f6debf40366ef7b17bbfa36dc5932ab54b97662/Kivy-2.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd3e923397779776ac97ad87a1b9dd603b7f1c911a6ae04f1d1658712eaaf7cb", size = 22727711 },
{ url = "https://files.pythonhosted.org/packages/21/c6/95f74cb29f7c9d191ab77f2b3c45443fd844317f813cd1398ca5a83629b4/Kivy-2.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7766baac2509d699df84b284579fa25ee31383d48893660cd8dba62081453a29", size = 22936178 },
{ url = "https://files.pythonhosted.org/packages/04/bb/3080154a7caac49a9853c11012f2e760924ef672166e4fdf4863a225f815/Kivy-2.3.0-cp311-cp311-win32.whl", hash = "sha256:d654aaec6ddf9ca0edf73abd79e6aea423299c825a7ac432df17b031adaa7900", size = 4255107 },
{ url = "https://files.pythonhosted.org/packages/41/7d/31896c7d9e99a2d2cc03ebbffcb80114fca318b2b9ea1963eec2e9ef62b3/Kivy-2.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:33dca85a520fe958e7134b96025b0625eb769adfb8829359959c8b314b7bc8d4", size = 4599341 },
{ url = "https://files.pythonhosted.org/packages/53/d2/7916fda4b13069db5b4e164bfce7d9f8dfaa03ad8b74ef95d75c9256968b/Kivy-2.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7b1307521843d316265481d963344e85870ae5fa0c7d0881129749acfe61da7b", size = 11263537 },
{ url = "https://files.pythonhosted.org/packages/37/37/198850f839b56383b91471af9641b0fd6f70e188f2c6a0710147a1fbb426/Kivy-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:521105a4ca1db3e1203c3cdba4abe737533874d9c29bbfb1e1ae941238507440", size = 22652477 },
{ url = "https://files.pythonhosted.org/packages/ce/ba/3d9df24f504e23783f9be377f70f2cb8430ae1d67a437c5b8a5440d6d775/Kivy-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6672959894f652856d1dfcbcdcc09263de5f1cbed768b997dc8dcecab4385a4f", size = 23013742 },
{ url = "https://files.pythonhosted.org/packages/8a/18/e287b46d3e0998c3cd02b8d436e5cc0bf6c48ff9524f23715fb387501037/Kivy-2.3.0-cp312-cp312-win32.whl", hash = "sha256:cf0bccc95b1344b79fbfdf54155d40438490f9801fd77279f068a4f66db72e4e", size = 4224022 },
{ url = "https://files.pythonhosted.org/packages/f2/95/79dce0bbfb66895c68f0844d54212149665b8b1d5e1247151fa9514055d9/Kivy-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:710648c987a63e37c723e6622853efe0278767596631a38728a54474b2cb77f2", size = 4568579 },
]
[package.optional-dependencies]
base = [
{ name = "docutils" },
{ name = "kivy-deps-angle", marker = "sys_platform == 'win32'" },
{ name = "kivy-deps-glew", marker = "sys_platform == 'win32'" },
{ name = "kivy-deps-sdl2", marker = "sys_platform == 'win32'" },
{ name = "pillow" },
{ name = "pygments" },
{ name = "pypiwin32", marker = "sys_platform == 'win32'" },
{ name = "requests" },
]
[[package]]
name = "kivy-deps-angle"
version = "0.4.0"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e8/1a/038c0844aa46fc49dcf8a657e890729f8960bfcac72e9c09c10cc18b32f2/kivy_deps.angle-0.4.0-cp311-cp311-win32.whl", hash = "sha256:c3899ff1f3886b80b155955bad07bfa33bbebd97718cdf46dfd788dc467124bc", size = 4588968 },
{ url = "https://files.pythonhosted.org/packages/c3/b1/d1ca22a7b18e7b2b90152a78a0c2d09a96fdb924f87be1914d70d9bee543/kivy_deps.angle-0.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:574381d4e66f3198bc48aa10f238e7a3816ad56b80ec939f5d56fb33a378d0b1", size = 5130936 },
{ url = "https://files.pythonhosted.org/packages/c1/89/bb8b9a0fee422972fcf38a406ee9d0b1636968d7d2b5e97aafea8fdec251/kivy_deps.angle-0.4.0-cp312-cp312-win32.whl", hash = "sha256:4fa7a6366899fba13f7624baf4645787165f45731db08d14557da29c12ee48f0", size = 4588969 },
{ url = "https://files.pythonhosted.org/packages/c7/f2/d1500b880d3079454af0f935408ddd37cfce4fd11f53d0917e169d478869/kivy_deps.angle-0.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:668e670d4afd2551af0af2c627ceb0feac884bd799fb6a3dff78fdbfa2ea0451", size = 5130935 },
{ url = "https://files.pythonhosted.org/packages/47/7e/ad805773fb76f07cb1bdf5147e66ba264a94f5ac54553cd9dee809a161bb/kivy_deps.angle-0.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:9afbf702f8bb9a993c48f39c018ca3b4d2ec381a5d3f82fe65bdaa6af0bba29b", size = 5133260 },
]
[[package]]
name = "kivy-deps-glew"
version = "0.3.1"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5d/4e/4300963410a49c4ab6eaadefce3e09c5e17421bc042b072862a27413b08c/kivy_deps.glew-0.3.1-cp311-cp311-win32.whl", hash = "sha256:ee2f80ef7ac70f4b61c50da8101b024308a8c59a57f7f25a6e09762b6c48f942", size = 126457 },
{ url = "https://files.pythonhosted.org/packages/d6/37/884034260818569547347cc2ba89780ff3f83a9ce6b9a894360c1d86e82c/kivy_deps.glew-0.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:22e155ec59ce717387f5d8804811206d200a023ba3d0bc9bbf1393ee28d0053e", size = 123574 },
{ url = "https://files.pythonhosted.org/packages/2b/3b/a960053dccd627e4483db4765fa84318a831cbf3af648aee20297ae56815/kivy_deps.glew-0.3.1-cp312-cp312-win32.whl", hash = "sha256:b64ee4e445a04bc7c848c0261a6045fc2f0944cc05d7f953e3860b49f2703424", size = 126458 },
{ url = "https://files.pythonhosted.org/packages/ad/3a/37a0a051dd3c7298d9e149a489457a6196665444c1a1473ad4fa617e05af/kivy_deps.glew-0.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:3acbbd30da05fc10c185b5d4bb75fbbc882a6ef2192963050c1c94d60a6e795a", size = 123573 },
{ url = "https://files.pythonhosted.org/packages/21/99/e3478c34afed7a820b3348ce7fefc53f2034fa340348dca57162695e69d9/kivy_deps.glew-0.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:f4aa8322078359862ccd9e16e5cea61976d75fb43125d87922e20c916fa31a11", size = 123595 },
]
[[package]]
name = "kivy-deps-sdl2"
version = "0.7.0"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/4d/d2/42a3f6f96c3a1a072fab5e0fbf58e7b27da5284023ae63383a5b55cb92cb/kivy_deps.sdl2-0.7.0-cp311-cp311-win32.whl", hash = "sha256:b727123d059c0c00c7d13cc1db8c8cfd0e48388cf24c11ec71cc6783811063c8", size = 3038046 },
{ url = "https://files.pythonhosted.org/packages/b7/ce/ca95180d14bb86cdbfe06774e6f5b2dd79bc8a88c525959e193dee81ec15/kivy_deps.sdl2-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:fd946ca4e36a403bcafbe202033948c17f54bd5d28a343d98efd61f976822855", size = 3508367 },
{ url = "https://files.pythonhosted.org/packages/47/4c/f4bc3d3ae226137e391642286421cc20baa54f3c3c6560289380b1253b9f/kivy_deps.sdl2-0.7.0-cp312-cp312-win32.whl", hash = "sha256:2a8f23fe201dea368b47adfecf8fb9133315788d314ad32f33000254aa2388e4", size = 3038046 },
{ url = "https://files.pythonhosted.org/packages/92/36/f7ccf4dd8ac06e25284e1ae9d5c4d3b5de87fbc05fb86a8ca83252bc52b7/kivy_deps.sdl2-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:e56d5d651f81545c24f920f6f6e5d67b4100802152521022ccde53e822c507a2", size = 3508366 },
]
[[package]]
name = "kivy-garden"
version = "0.1.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "requests" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/6a/55/cd1555bde62f809219cbc5d8a0836b0293399da2f4ba4e8ee84b6a7cc393/Kivy_Garden-0.1.5-py3-none-any.whl", hash = "sha256:ef50f44b96358cf10ac5665f27a4751bb34ef54051c54b93af891f80afe42929", size = 4623 },
]
[[package]]
name = "macholib"
version = "1.16.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "altgraph" },
]
sdist = { url = "https://files.pythonhosted.org/packages/95/ee/af1a3842bdd5902ce133bd246eb7ffd4375c38642aeb5dc0ae3a0329dfa2/macholib-1.16.3.tar.gz", hash = "sha256:07ae9e15e8e4cd9a788013d81f5908b3609aa76f9b1421bae9c4d7606ec86a30", size = 59309 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/5d/c059c180c84f7962db0aeae7c3b9303ed1d73d76f2bfbc32bc231c8be314/macholib-1.16.3-py2.py3-none-any.whl", hash = "sha256:0e315d7583d38b8c77e815b1ecbdbf504a8258d8b3e17b61165c6feb60d18f2c", size = 38094 },
]
[[package]]
name = "markovbot-gui"
version = "0.1.0"
source = { virtual = "." }
dependencies = [
{ name = "kivy", extra = ["base"] },
{ name = "kivy-deps-sdl2" },
{ name = "nltk" },
{ name = "pillow" },
{ name = "platformdirs" },
{ name = "pyinstaller" },
{ name = "twitchwebsocket" },
]
[package.dev-dependencies]
dev = [
{ name = "ruff" },
]
[package.metadata]
requires-dist = [
{ name = "kivy", extras = ["base"], specifier = ">=2.3.0" },
{ name = "kivy-deps-sdl2", specifier = ">=0.7.0" },
{ name = "nltk", specifier = ">=3.9.1" },
{ name = "pillow", specifier = ">=10.4.0" },
{ name = "platformdirs", specifier = ">=4.3.6" },
{ name = "pyinstaller", specifier = ">=6.11.0" },
{ name = "twitchwebsocket", specifier = ">=1.2.1" },
]
[package.metadata.requires-dev]
dev = [{ name = "ruff", specifier = ">=0.7.0" }]
[[package]]
name = "nltk"
version = "3.9.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "joblib" },
{ name = "regex" },
{ name = "tqdm" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3c/87/db8be88ad32c2d042420b6fd9ffd4a149f9a0d7f0e86b3f543be2eeeedd2/nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868", size = 2904691 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/4d/66/7d9e26593edda06e8cb531874633f7c2372279c3b0f46235539fe546df8b/nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1", size = 1505442 },
]
[[package]]
name = "packaging"
version = "24.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/51/65/50db4dda066951078f0a96cf12f4b9ada6e4b811516bf0262c0f4f7064d4/packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", size = 148788 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/08/aa/cc0199a5f0ad350994d660967a8efb233fe0416e4639146c089643407ce6/packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124", size = 53985 },
]
[[package]]
name = "pefile"
version = "2023.2.7"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/78/c5/3b3c62223f72e2360737fd2a57c30e5b2adecd85e70276879609a7403334/pefile-2023.2.7.tar.gz", hash = "sha256:82e6114004b3d6911c77c3953e3838654b04511b8b66e8583db70c65998017dc", size = 74854 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/55/26/d0ad8b448476d0a1e8d3ea5622dc77b916db84c6aa3cb1e1c0965af948fc/pefile-2023.2.7-py3-none-any.whl", hash = "sha256:da185cd2af68c08a6cd4481f7325ed600a88f6a813bad9dea07ab3ef73d8d8d6", size = 71791 },
]
[[package]]
name = "pillow"
version = "10.4.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/cd/74/ad3d526f3bf7b6d3f408b73fde271ec69dfac8b81341a318ce825f2b3812/pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06", size = 46555059 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a7/62/c9449f9c3043c37f73e7487ec4ef0c03eb9c9afc91a92b977a67b3c0bbc5/pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c", size = 3509265 },
{ url = "https://files.pythonhosted.org/packages/f4/5f/491dafc7bbf5a3cc1845dc0430872e8096eb9e2b6f8161509d124594ec2d/pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be", size = 3375655 },
{ url = "https://files.pythonhosted.org/packages/73/d5/c4011a76f4207a3c151134cd22a1415741e42fa5ddecec7c0182887deb3d/pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3", size = 4340304 },
{ url = "https://files.pythonhosted.org/packages/ac/10/c67e20445a707f7a610699bba4fe050583b688d8cd2d202572b257f46600/pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6", size = 4452804 },
{ url = "https://files.pythonhosted.org/packages/a9/83/6523837906d1da2b269dee787e31df3b0acb12e3d08f024965a3e7f64665/pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe", size = 4365126 },
{ url = "https://files.pythonhosted.org/packages/ba/e5/8c68ff608a4203085158cff5cc2a3c534ec384536d9438c405ed6370d080/pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319", size = 4533541 },
{ url = "https://files.pythonhosted.org/packages/f4/7c/01b8dbdca5bc6785573f4cee96e2358b0918b7b2c7b60d8b6f3abf87a070/pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d", size = 4471616 },
{ url = "https://files.pythonhosted.org/packages/c8/57/2899b82394a35a0fbfd352e290945440e3b3785655a03365c0ca8279f351/pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696", size = 4600802 },
{ url = "https://files.pythonhosted.org/packages/4d/d7/a44f193d4c26e58ee5d2d9db3d4854b2cfb5b5e08d360a5e03fe987c0086/pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496", size = 2235213 },
{ url = "https://files.pythonhosted.org/packages/c1/d0/5866318eec2b801cdb8c82abf190c8343d8a1cd8bf5a0c17444a6f268291/pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91", size = 2554498 },
{ url = "https://files.pythonhosted.org/packages/d4/c8/310ac16ac2b97e902d9eb438688de0d961660a87703ad1561fd3dfbd2aa0/pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22", size = 2243219 },
{ url = "https://files.pythonhosted.org/packages/05/cb/0353013dc30c02a8be34eb91d25e4e4cf594b59e5a55ea1128fde1e5f8ea/pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94", size = 3509350 },
{ url = "https://files.pythonhosted.org/packages/e7/cf/5c558a0f247e0bf9cec92bff9b46ae6474dd736f6d906315e60e4075f737/pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597", size = 3374980 },
{ url = "https://files.pythonhosted.org/packages/84/48/6e394b86369a4eb68b8a1382c78dc092245af517385c086c5094e3b34428/pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80", size = 4343799 },
{ url = "https://files.pythonhosted.org/packages/3b/f3/a8c6c11fa84b59b9df0cd5694492da8c039a24cd159f0f6918690105c3be/pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca", size = 4459973 },
{ url = "https://files.pythonhosted.org/packages/7d/1b/c14b4197b80150fb64453585247e6fb2e1d93761fa0fa9cf63b102fde822/pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef", size = 4370054 },
{ url = "https://files.pythonhosted.org/packages/55/77/40daddf677897a923d5d33329acd52a2144d54a9644f2a5422c028c6bf2d/pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a", size = 4539484 },
{ url = "https://files.pythonhosted.org/packages/40/54/90de3e4256b1207300fb2b1d7168dd912a2fb4b2401e439ba23c2b2cabde/pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b", size = 4477375 },
{ url = "https://files.pythonhosted.org/packages/13/24/1bfba52f44193860918ff7c93d03d95e3f8748ca1de3ceaf11157a14cf16/pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9", size = 4608773 },
{ url = "https://files.pythonhosted.org/packages/55/04/5e6de6e6120451ec0c24516c41dbaf80cce1b6451f96561235ef2429da2e/pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42", size = 2235690 },
{ url = "https://files.pythonhosted.org/packages/74/0a/d4ce3c44bca8635bd29a2eab5aa181b654a734a29b263ca8efe013beea98/pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a", size = 2554951 },
{ url = "https://files.pythonhosted.org/packages/b5/ca/184349ee40f2e92439be9b3502ae6cfc43ac4b50bc4fc6b3de7957563894/pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9", size = 2243427 },
{ url = "https://files.pythonhosted.org/packages/c3/00/706cebe7c2c12a6318aabe5d354836f54adff7156fd9e1bd6c89f4ba0e98/pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3", size = 3525685 },
{ url = "https://files.pythonhosted.org/packages/cf/76/f658cbfa49405e5ecbfb9ba42d07074ad9792031267e782d409fd8fe7c69/pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb", size = 3374883 },
{ url = "https://files.pythonhosted.org/packages/46/2b/99c28c4379a85e65378211971c0b430d9c7234b1ec4d59b2668f6299e011/pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70", size = 4339837 },
{ url = "https://files.pythonhosted.org/packages/f1/74/b1ec314f624c0c43711fdf0d8076f82d9d802afd58f1d62c2a86878e8615/pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be", size = 4455562 },
{ url = "https://files.pythonhosted.org/packages/4a/2a/4b04157cb7b9c74372fa867096a1607e6fedad93a44deeff553ccd307868/pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0", size = 4366761 },
{ url = "https://files.pythonhosted.org/packages/ac/7b/8f1d815c1a6a268fe90481232c98dd0e5fa8c75e341a75f060037bd5ceae/pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc", size = 4536767 },
{ url = "https://files.pythonhosted.org/packages/e5/77/05fa64d1f45d12c22c314e7b97398ffb28ef2813a485465017b7978b3ce7/pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a", size = 4477989 },
{ url = "https://files.pythonhosted.org/packages/12/63/b0397cfc2caae05c3fb2f4ed1b4fc4fc878f0243510a7a6034ca59726494/pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309", size = 4610255 },
{ url = "https://files.pythonhosted.org/packages/7b/f9/cfaa5082ca9bc4a6de66ffe1c12c2d90bf09c309a5f52b27759a596900e7/pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060", size = 2235603 },
{ url = "https://files.pythonhosted.org/packages/01/6a/30ff0eef6e0c0e71e55ded56a38d4859bf9d3634a94a88743897b5f96936/pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea", size = 2554972 },
{ url = "https://files.pythonhosted.org/packages/48/2c/2e0a52890f269435eee38b21c8218e102c621fe8d8df8b9dd06fabf879ba/pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d", size = 2243375 },
]
[[package]]
name = "platformdirs"
version = "4.3.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 },
]
[[package]]
name = "pygments"
version = "2.18.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/8e/62/8336eff65bcbc8e4cb5d05b55faf041285951b6e80f33e2bff2024788f31/pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", size = 4891905 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f7/3f/01c8b82017c199075f8f788d0d906b9ffbbc5a47dc9918a945e13d5a2bda/pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a", size = 1205513 },
]
[[package]]
name = "pyinstaller"
version = "6.11.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "altgraph" },
{ name = "macholib", marker = "sys_platform == 'darwin'" },
{ name = "packaging" },
{ name = "pefile", marker = "sys_platform == 'win32'" },
{ name = "pyinstaller-hooks-contrib" },
{ name = "pywin32-ctypes", marker = "sys_platform == 'win32'" },
{ name = "setuptools" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b0/98/170e3117657366560f355c154a5f4e1b9e6aee53c4f35127fe0c9aecb0e9/pyinstaller-6.11.0.tar.gz", hash = "sha256:cb4d433a3db30d9d17cf5f2cf7bb4df80a788d493c1d67dd822dc5791d9864af", size = 4245274 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/4a/c5/17e1d226035d4fbc154908c12d96ab7ee385f8d788c96085adc912dbed5b/pyinstaller-6.11.0-py3-none-macosx_10_13_universal2.whl", hash = "sha256:6fd68a3c1207635c49326c54881b89d5c3bd9ba061bbc9daa58c0902db1be39e", size = 844428 },
{ url = "https://files.pythonhosted.org/packages/88/13/f15bc608d6c2cfcfb6f4628cd885e0f82da121210a375279a2bfac3c4c08/pyinstaller-6.11.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:eddd53f231e51adc65088eac4f40057ca803a990239828d4a9229407fb866239", size = 709617 },
{ url = "https://files.pythonhosted.org/packages/2e/ca/7276e3a19a9ab378eecafbcbead9183b344267c380e3a9c18b3040fd331d/pyinstaller-6.11.0-py3-none-manylinux2014_i686.whl", hash = "sha256:e6d229009e815542833fe00332b589aa6984a06f794dc16f2ce1acab1c567590", size = 711883 },
{ url = "https://files.pythonhosted.org/packages/68/7a/59f690b142db96f378e24c72a8a396ea2af6a41e66b06ebba9427d0b12fd/pyinstaller-6.11.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:7d2cd2ebdcd6860f8a4abe2977264a7b6d260a7147047008971c7cfc66a656a4", size = 717051 },
{ url = "https://files.pythonhosted.org/packages/66/d4/30e27bca5d1ee919b3ea2c727315892ebae2dad32203771c66ca42eefac2/pyinstaller-6.11.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:d9ec6d4398b4eebc1d4c00437716264ba8406bc2746f594e253070a82378a584", size = 707434 },
{ url = "https://files.pythonhosted.org/packages/58/a7/0e925c9e4351872e95e3cf96e58c859ecd43152aee29211d79ec9becb26e/pyinstaller-6.11.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:04f71828aa9531ab18c9656985c1f09b83d10332c73a1f4a113a48b491906955", size = 707917 },
{ url = "https://files.pythonhosted.org/packages/d6/c8/05980cdaf7124b26114acd80568d3f74c515057cc1c86886025e8b09a3df/pyinstaller-6.11.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:a843d470768d68b05684ccf4860c45b2eb13727f41667c0b2cd8f57ae231bd18", size = 712337 },
{ url = "https://files.pythonhosted.org/packages/7b/8f/357afd2b1b76961e43f727e7e2f8c01760b2f1497b0279ed4fdf4fe7e35e/pyinstaller-6.11.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:963dedc1f37144a4385f58f7f65f1c69c004a67faae522a2085b5ddb230c908b", size = 708315 },
{ url = "https://files.pythonhosted.org/packages/d5/4c/4e2c10b746e5cedf806c2df3a890faf2506cb4388e94672d59374d8dc17f/pyinstaller-6.11.0-py3-none-win32.whl", hash = "sha256:c71024c8a19c7b221b9152b2baff4c3ba849cada68dcdd34382ba09f0107451f", size = 1277392 },
{ url = "https://files.pythonhosted.org/packages/f0/45/8e609d66361cfd17ba89e07ae5ed1ca858d084e47f0fad3f9c277c96801b/pyinstaller-6.11.0-py3-none-win_amd64.whl", hash = "sha256:0e229610c22b96d741d905706f9496af472c1a9216a118988f393c98ecc3f51f", size = 1337562 },
{ url = "https://files.pythonhosted.org/packages/26/77/203dda3d02fe5c899ffbbdb1942f0adc750dabbe25e5a1558354fc672dd8/pyinstaller-6.11.0-py3-none-win_arm64.whl", hash = "sha256:a5f716bb507517912fda39d109dead91fc0dd2e7b2859562522b63c61aa21676", size = 1264356 },
]
[[package]]
name = "pyinstaller-hooks-contrib"
version = "2024.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "packaging" },
{ name = "setuptools" },
]
sdist = { url = "https://files.pythonhosted.org/packages/fe/ca/218b8dc15d48e69fafef69a97a4455db7a01c01aea4eb4bf1ae8a6ad7ef9/pyinstaller_hooks_contrib-2024.9.tar.gz", hash = "sha256:4793869f370d1dc4806c101efd2890e3c3e703467d8d27bb5a3db005ebfb008d", size = 139956 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/57/b4/23338112c76750f494a2fded981d40ebd723585b2b6eadde6a821ddc7208/pyinstaller_hooks_contrib-2024.9-py3-none-any.whl", hash = "sha256:1ddf9ba21d586afa84e505bb5c65fca10b22500bf3fdb89ee2965b99da53b891", size = 336956 },
]
[[package]]
name = "pypiwin32"
version = "223"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "pywin32" },
]
sdist = { url = "https://files.pythonhosted.org/packages/13/e8/4f38eb30c4dae36634a53c5b2cd73b517ea3607e10d00f61f2494449cec0/pypiwin32-223.tar.gz", hash = "sha256:71be40c1fbd28594214ecaecb58e7aa8b708eabfa0125c8a109ebd51edbd776a", size = 622 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d0/1b/2f292bbd742e369a100c91faa0483172cd91a1a422a6692055ac920946c5/pypiwin32-223-py3-none-any.whl", hash = "sha256:67adf399debc1d5d14dffc1ab5acacb800da569754fafdc576b2a039485aa775", size = 1674 },
]
[[package]]
name = "pywin32"
version = "308"
source = { registry = "https://pypi.org/simple" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/eb/e2/02652007469263fe1466e98439831d65d4ca80ea1a2df29abecedf7e47b7/pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a", size = 5928156 },
{ url = "https://files.pythonhosted.org/packages/48/ef/f4fb45e2196bc7ffe09cad0542d9aff66b0e33f6c0954b43e49c33cad7bd/pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b", size = 6559559 },
{ url = "https://files.pythonhosted.org/packages/79/ef/68bb6aa865c5c9b11a35771329e95917b5559845bd75b65549407f9fc6b4/pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6", size = 7972495 },
{ url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 },
{ url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 },
{ url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 },
{ url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 },
{ url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 },
{ url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 },
]
[[package]]
name = "pywin32-ctypes"
version = "0.2.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/85/9f/01a1a99704853cb63f253eea009390c88e7131c67e66a0a02099a8c917cb/pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755", size = 29471 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/de/3d/8161f7711c017e01ac9f008dfddd9410dff3674334c233bde66e7ba65bbf/pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8", size = 30756 },
]
[[package]]
name = "regex"
version = "2024.9.11"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f9/38/148df33b4dbca3bd069b963acab5e0fa1a9dbd6820f8c322d0dd6faeff96/regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd", size = 399403 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/86/a1/d526b7b6095a0019aa360948c143aacfeb029919c898701ce7763bbe4c15/regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df", size = 482483 },
{ url = "https://files.pythonhosted.org/packages/32/d9/bfdd153179867c275719e381e1e8e84a97bd186740456a0dcb3e7125c205/regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268", size = 287442 },
{ url = "https://files.pythonhosted.org/packages/33/c4/60f3370735135e3a8d673ddcdb2507a8560d0e759e1398d366e43d000253/regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad", size = 284561 },
{ url = "https://files.pythonhosted.org/packages/b1/51/91a5ebdff17f9ec4973cb0aa9d37635efec1c6868654bbc25d1543aca4ec/regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679", size = 791779 },
{ url = "https://files.pythonhosted.org/packages/07/4a/022c5e6f0891a90cd7eb3d664d6c58ce2aba48bff107b00013f3d6167069/regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4", size = 832605 },
{ url = "https://files.pythonhosted.org/packages/ac/1c/3793990c8c83ca04e018151ddda83b83ecc41d89964f0f17749f027fc44d/regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664", size = 818556 },
{ url = "https://files.pythonhosted.org/packages/e9/5c/8b385afbfacb853730682c57be56225f9fe275c5bf02ac1fc88edbff316d/regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50", size = 792808 },
{ url = "https://files.pythonhosted.org/packages/9b/8b/a4723a838b53c771e9240951adde6af58c829fb6a6a28f554e8131f53839/regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199", size = 781115 },
{ url = "https://files.pythonhosted.org/packages/83/5f/031a04b6017033d65b261259c09043c06f4ef2d4eac841d0649d76d69541/regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4", size = 778155 },
{ url = "https://files.pythonhosted.org/packages/fd/cd/4660756070b03ce4a66663a43f6c6e7ebc2266cc6b4c586c167917185eb4/regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd", size = 784614 },
{ url = "https://files.pythonhosted.org/packages/93/8d/65b9bea7df120a7be8337c415b6d256ba786cbc9107cebba3bf8ff09da99/regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f", size = 853744 },
{ url = "https://files.pythonhosted.org/packages/96/a7/fba1eae75eb53a704475baf11bd44b3e6ccb95b316955027eb7748f24ef8/regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96", size = 855890 },
{ url = "https://files.pythonhosted.org/packages/45/14/d864b2db80a1a3358534392373e8a281d95b28c29c87d8548aed58813910/regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1", size = 781887 },
{ url = "https://files.pythonhosted.org/packages/4d/a9/bfb29b3de3eb11dc9b412603437023b8e6c02fb4e11311863d9bf62c403a/regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9", size = 261644 },
{ url = "https://files.pythonhosted.org/packages/c7/ab/1ad2511cf6a208fde57fafe49829cab8ca018128ab0d0b48973d8218634a/regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf", size = 274033 },
{ url = "https://files.pythonhosted.org/packages/6e/92/407531450762bed778eedbde04407f68cbd75d13cee96c6f8d6903d9c6c1/regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7", size = 483590 },
{ url = "https://files.pythonhosted.org/packages/8e/a2/048acbc5ae1f615adc6cba36cc45734e679b5f1e4e58c3c77f0ed611d4e2/regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231", size = 288175 },
{ url = "https://files.pythonhosted.org/packages/8a/ea/909d8620329ab710dfaf7b4adee41242ab7c9b95ea8d838e9bfe76244259/regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d", size = 284749 },
{ url = "https://files.pythonhosted.org/packages/ca/fa/521eb683b916389b4975337873e66954e0f6d8f91bd5774164a57b503185/regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64", size = 795181 },
{ url = "https://files.pythonhosted.org/packages/28/db/63047feddc3280cc242f9c74f7aeddc6ee662b1835f00046f57d5630c827/regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42", size = 835842 },
{ url = "https://files.pythonhosted.org/packages/e3/94/86adc259ff8ec26edf35fcca7e334566c1805c7493b192cb09679f9c3dee/regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766", size = 823533 },
{ url = "https://files.pythonhosted.org/packages/29/52/84662b6636061277cb857f658518aa7db6672bc6d1a3f503ccd5aefc581e/regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a", size = 797037 },
{ url = "https://files.pythonhosted.org/packages/c3/2a/cd4675dd987e4a7505f0364a958bc41f3b84942de9efaad0ef9a2646681c/regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9", size = 784106 },
{ url = "https://files.pythonhosted.org/packages/6f/75/3ea7ec29de0bbf42f21f812f48781d41e627d57a634f3f23947c9a46e303/regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d", size = 782468 },
{ url = "https://files.pythonhosted.org/packages/d3/67/15519d69b52c252b270e679cb578e22e0c02b8dd4e361f2b04efcc7f2335/regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822", size = 790324 },
{ url = "https://files.pythonhosted.org/packages/9c/71/eff77d3fe7ba08ab0672920059ec30d63fa7e41aa0fb61c562726e9bd721/regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0", size = 860214 },
{ url = "https://files.pythonhosted.org/packages/81/11/e1bdf84a72372e56f1ea4b833dd583b822a23138a616ace7ab57a0e11556/regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a", size = 859420 },
{ url = "https://files.pythonhosted.org/packages/ea/75/9753e9dcebfa7c3645563ef5c8a58f3a47e799c872165f37c55737dadd3e/regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a", size = 787333 },
{ url = "https://files.pythonhosted.org/packages/bc/4e/ba1cbca93141f7416624b3ae63573e785d4bc1834c8be44a8f0747919eca/regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776", size = 262058 },
{ url = "https://files.pythonhosted.org/packages/6e/16/efc5f194778bf43e5888209e5cec4b258005d37c613b67ae137df3b89c53/regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009", size = 273526 },
{ url = "https://files.pythonhosted.org/packages/93/0a/d1c6b9af1ff1e36832fe38d74d5c5bab913f2bdcbbd6bc0e7f3ce8b2f577/regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784", size = 483376 },
{ url = "https://files.pythonhosted.org/packages/a4/42/5910a050c105d7f750a72dcb49c30220c3ae4e2654e54aaaa0e9bc0584cb/regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36", size = 288112 },
{ url = "https://files.pythonhosted.org/packages/8d/56/0c262aff0e9224fa7ffce47b5458d373f4d3e3ff84e99b5ff0cb15e0b5b2/regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92", size = 284608 },
{ url = "https://files.pythonhosted.org/packages/b9/54/9fe8f9aec5007bbbbce28ba3d2e3eaca425f95387b7d1e84f0d137d25237/regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86", size = 795337 },
{ url = "https://files.pythonhosted.org/packages/b2/e7/6b2f642c3cded271c4f16cc4daa7231be544d30fe2b168e0223724b49a61/regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85", size = 835848 },
{ url = "https://files.pythonhosted.org/packages/cd/9e/187363bdf5d8c0e4662117b92aa32bf52f8f09620ae93abc7537d96d3311/regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963", size = 823503 },
{ url = "https://files.pythonhosted.org/packages/f8/10/601303b8ee93589f879664b0cfd3127949ff32b17f9b6c490fb201106c4d/regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6", size = 797049 },
{ url = "https://files.pythonhosted.org/packages/ef/1c/ea200f61ce9f341763f2717ab4daebe4422d83e9fd4ac5e33435fd3a148d/regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802", size = 784144 },
{ url = "https://files.pythonhosted.org/packages/d8/5c/d2429be49ef3292def7688401d3deb11702c13dcaecdc71d2b407421275b/regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29", size = 782483 },
{ url = "https://files.pythonhosted.org/packages/12/d9/cbc30f2ff7164f3b26a7760f87c54bf8b2faed286f60efd80350a51c5b99/regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8", size = 790320 },
{ url = "https://files.pythonhosted.org/packages/19/1d/43ed03a236313639da5a45e61bc553c8d41e925bcf29b0f8ecff0c2c3f25/regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84", size = 860435 },
{ url = "https://files.pythonhosted.org/packages/34/4f/5d04da61c7c56e785058a46349f7285ae3ebc0726c6ea7c5c70600a52233/regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554", size = 859571 },
{ url = "https://files.pythonhosted.org/packages/12/7f/8398c8155a3c70703a8e91c29532558186558e1aea44144b382faa2a6f7a/regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8", size = 787398 },
{ url = "https://files.pythonhosted.org/packages/58/3a/f5903977647a9a7e46d5535e9e96c194304aeeca7501240509bde2f9e17f/regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8", size = 262035 },
{ url = "https://files.pythonhosted.org/packages/ff/80/51ba3a4b7482f6011095b3a036e07374f64de180b7d870b704ed22509002/regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f", size = 273510 },
]
[[package]]
name = "requests"
version = "2.32.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "charset-normalizer" },
{ name = "idna" },
{ name = "urllib3" },
]
sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 },
]
[[package]]
name = "ruff"
version = "0.7.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/2c/c7/f3367d1da5d568192968c5c9e7f3d51fb317b9ac04828493b23d8fce8ce6/ruff-0.7.0.tar.gz", hash = "sha256:47a86360cf62d9cd53ebfb0b5eb0e882193fc191c6d717e8bef4462bc3b9ea2b", size = 3146645 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/48/59/a0275a0913f3539498d116046dd679cd657fe3b7caf5afe1733319414932/ruff-0.7.0-py3-none-linux_armv6l.whl", hash = "sha256:0cdf20c2b6ff98e37df47b2b0bd3a34aaa155f59a11182c1303cce79be715628", size = 10434007 },
{ url = "https://files.pythonhosted.org/packages/cd/94/da0ba5f956d04c90dd899209904210600009dcda039ce840d83eb4298c7d/ruff-0.7.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:496494d350c7fdeb36ca4ef1c9f21d80d182423718782222c29b3e72b3512737", size = 10048066 },
{ url = "https://files.pythonhosted.org/packages/57/1d/e5cc149ecc46e4f203403a79ccd170fad52d316f98b87d0f63b1945567db/ruff-0.7.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:214b88498684e20b6b2b8852c01d50f0651f3cc6118dfa113b4def9f14faaf06", size = 9711389 },
{ url = "https://files.pythonhosted.org/packages/05/67/fb7ea2c869c539725a16c5bc294e9aa34f8b1b6fe702f1d173a5da517c2b/ruff-0.7.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630fce3fefe9844e91ea5bbf7ceadab4f9981f42b704fae011bb8efcaf5d84be", size = 10755174 },
{ url = "https://files.pythonhosted.org/packages/5f/f0/13703bc50536a0613ea3dce991116e5f0917a1f05528c6ab738b33c08d3f/ruff-0.7.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:211d877674e9373d4bb0f1c80f97a0201c61bcd1e9d045b6e9726adc42c156aa", size = 10196040 },
{ url = "https://files.pythonhosted.org/packages/99/c1/77b04ab20324ab03d333522ee55fb0f1c38e3ca0d326b4905f82ce6b6c70/ruff-0.7.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:194d6c46c98c73949a106425ed40a576f52291c12bc21399eb8f13a0f7073495", size = 11033684 },
{ url = "https://files.pythonhosted.org/packages/f2/97/f463334dc4efeea3551cd109163df15561c18a1c3ec13d51643740fd36ba/ruff-0.7.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:82c2579b82b9973a110fab281860403b397c08c403de92de19568f32f7178598", size = 11803700 },
{ url = "https://files.pythonhosted.org/packages/b4/f8/a31d40c4bb92933d376a53e7c5d0245d9b27841357e4820e96d38f54b480/ruff-0.7.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9af971fe85dcd5eaed8f585ddbc6bdbe8c217fb8fcf510ea6bca5bdfff56040e", size = 11347848 },
{ url = "https://files.pythonhosted.org/packages/83/62/0c133b35ddaf91c65c30a56718b80bdef36bfffc35684d29e3a4878e0ea3/ruff-0.7.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b641c7f16939b7d24b7bfc0be4102c56562a18281f84f635604e8a6989948914", size = 12480632 },
{ url = "https://files.pythonhosted.org/packages/46/96/464058dd1d980014fb5aa0a1254e78799efb3096fc7a4823cd66a1621276/ruff-0.7.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d71672336e46b34e0c90a790afeac8a31954fd42872c1f6adaea1dff76fd44f9", size = 10941919 },
{ url = "https://files.pythonhosted.org/packages/a0/f7/bda37ec77986a435dde44e1f59374aebf4282a5fa9cf17735315b847141f/ruff-0.7.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ab7d98c7eed355166f367597e513a6c82408df4181a937628dbec79abb2a1fe4", size = 10745519 },
{ url = "https://files.pythonhosted.org/packages/c2/33/5f77fc317027c057b61a848020a47442a1cbf12e592df0e41e21f4d0f3bd/ruff-0.7.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1eb54986f770f49edb14f71d33312d79e00e629a57387382200b1ef12d6a4ef9", size = 10284872 },
{ url = "https://files.pythonhosted.org/packages/ff/50/98aec292bc9537f640b8d031c55f3414bf15b6ed13b3e943fed75ac927b9/ruff-0.7.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:dc452ba6f2bb9cf8726a84aa877061a2462afe9ae0ea1d411c53d226661c601d", size = 10600334 },
{ url = "https://files.pythonhosted.org/packages/f2/85/12607ae3201423a179b8cfadc7cb1e57d02cd0135e45bd0445acb4cef327/ruff-0.7.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:4b406c2dce5be9bad59f2de26139a86017a517e6bcd2688da515481c05a2cb11", size = 11017333 },
{ url = "https://files.pythonhosted.org/packages/d4/7f/3b85a56879e705d5f46ec14daf8a439fca05c3081720fe3dc3209100922d/ruff-0.7.0-py3-none-win32.whl", hash = "sha256:f6c968509f767776f524a8430426539587d5ec5c662f6addb6aa25bc2e8195ec", size = 8570962 },
{ url = "https://files.pythonhosted.org/packages/39/9f/c5ee2b40d377354dabcc23cff47eb299de4b4d06d345068f8f8cc1eadac8/ruff-0.7.0-py3-none-win_amd64.whl", hash = "sha256:ff4aabfbaaba880e85d394603b9e75d32b0693152e16fa659a3064a85df7fce2", size = 9365544 },
{ url = "https://files.pythonhosted.org/packages/89/8b/ee1509f60148cecba644aa718f6633216784302458340311898aaf0b1bed/ruff-0.7.0-py3-none-win_arm64.whl", hash = "sha256:10842f69c245e78d6adec7e1db0a7d9ddc2fff0621d730e61657b64fa36f207e", size = 8695763 },
]
[[package]]
name = "setuptools"
version = "75.2.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/07/37/b31be7e4b9f13b59cde9dcaeff112d401d49e0dc5b37ed4a9fc8fb12f409/setuptools-75.2.0.tar.gz", hash = "sha256:753bb6ebf1f465a1912e19ed1d41f403a79173a9acf66a42e7e6aec45c3c16ec", size = 1350308 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/31/2d/90165d51ecd38f9a02c6832198c13a4e48652485e2ccf863ebb942c531b6/setuptools-75.2.0-py3-none-any.whl", hash = "sha256:a7fcb66f68b4d9e8e66b42f9876150a3371558f98fa32222ffaa5bced76406f8", size = 1249825 },
]
[[package]]
name = "tqdm"
version = "4.66.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "platform_system == 'Windows'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/58/83/6ba9844a41128c62e810fddddd72473201f3eacde02046066142a2d96cc5/tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad", size = 169504 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/48/5d/acf5905c36149bbaec41ccf7f2b68814647347b72075ac0b1fe3022fdc73/tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd", size = 78351 },
]
[[package]]
name = "twitchwebsocket"
version = "1.2.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/28/06/89e4ff964a7c9ca9e098d9cf9dd4fb09b3e32c5578d51f0ab27f0164bd80/TwitchWebsocket-1.2.1.tar.gz", hash = "sha256:b43d6981a691468ee49eff261d9120a75f2a4d895fabeb9813910e0b32742cef", size = 15029 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/fe/d8/4dcd312dd333f1e0664afb9a91672a684d188eb2dc18c1e6deb4901364d7/TwitchWebsocket-1.2.1-py2.py3-none-any.whl", hash = "sha256:f24a12b7bf68d9e348abeb317b63710813b44e8aadbebacdfd1077a8e5bcdfbd", size = 11897 },
]
[[package]]
name = "urllib3"
version = "2.2.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ed/63/22ba4ebfe7430b76388e7cd448d5478814d3032121827c12a2cc287e2260/urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9", size = 300677 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ce/d9/5f4c13cecde62396b0d3fe530a50ccea91e7dfc1ccf0e09c228841bb5ba8/urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", size = 126338 },
]