mirror of
https://github.com/xuthus83/LittlePaimon.git
synced 2024-10-21 16:27:15 +08:00
🔥 新增群聊学习
,修复入群欢迎设置
和角色多莉
This commit is contained in:
parent
4e6666fd01
commit
276f0732aa
@ -7,7 +7,7 @@ from LittlePaimon.utils.migration import migrate_database
|
||||
from LittlePaimon.utils.tool import check_resource
|
||||
|
||||
DRIVER = get_driver()
|
||||
__version__ = '3.0.0beta4'
|
||||
__version__ = '3.0.0beta5'
|
||||
|
||||
try:
|
||||
SUPERUSERS: List[int] = [int(s) for s in DRIVER.config.superusers]
|
||||
|
@ -52,9 +52,9 @@
|
||||
"10000060": ["夜兰", "夜阑", "叶兰"],
|
||||
"10000065": ["久岐忍", "忍者", "阿卡丽", "97忍", "97人"],
|
||||
"10000059": ["鹿野院平藏", "近战法师", "平藏", "小鹿"],
|
||||
"10000067": ["柯莱"],
|
||||
"10000068": ["多莉", "克隆羊"],
|
||||
"10000069": ["提纳里", "小驴"]
|
||||
"10000067": ["柯莱", "科莱"],
|
||||
"10000068": ["多莉", "克隆羊", "商人"],
|
||||
"10000069": ["提纳里", "小驴", "小提"]
|
||||
},
|
||||
"武器": {
|
||||
"磐岩结绿": [
|
||||
|
@ -1,55 +0,0 @@
|
||||
{
|
||||
"神里绫华": "冰",
|
||||
"琴": "风",
|
||||
"丽莎": "雷",
|
||||
"芭芭拉": "水",
|
||||
"凯亚": "冰",
|
||||
"迪卢克": "火",
|
||||
"雷泽": "雷",
|
||||
"安柏": "火",
|
||||
"温迪": "风",
|
||||
"香菱": "火",
|
||||
"北斗": "雷",
|
||||
"行秋": "水",
|
||||
"魈": "风",
|
||||
"凝光": "岩",
|
||||
"可莉": "火",
|
||||
"钟离": "岩",
|
||||
"菲谢尔": "雷",
|
||||
"班尼特": "火",
|
||||
"达达利亚": "水",
|
||||
"诺艾尔": "岩",
|
||||
"七七": "冰",
|
||||
"重云": "冰",
|
||||
"甘雨": "冰",
|
||||
"阿贝多": "岩",
|
||||
"迪奥娜": "冰",
|
||||
"莫娜": "水",
|
||||
"刻晴": "雷",
|
||||
"砂糖": "风",
|
||||
"辛焱": "火",
|
||||
"罗莎莉亚": "冰",
|
||||
"胡桃": "火",
|
||||
"枫原万叶": "风",
|
||||
"烟绯": "火",
|
||||
"宵宫": "火",
|
||||
"托马": "火",
|
||||
"优菈": "冰",
|
||||
"雷电将军": "雷",
|
||||
"早柚": "风",
|
||||
"珊瑚宫心海": "水",
|
||||
"五郎": "岩",
|
||||
"九条裟罗": "雷",
|
||||
"荒泷一斗": "岩",
|
||||
"八重神子": "雷",
|
||||
"夜兰": "水",
|
||||
"埃洛伊": "冰",
|
||||
"申鹤": "冰",
|
||||
"云堇": "岩",
|
||||
"久岐忍": "雷",
|
||||
"神里绫人": "水",
|
||||
"鹿野院平藏": "风",
|
||||
"提纳里": "草",
|
||||
"柯莱": "草",
|
||||
"多莉": "雷"
|
||||
}
|
@ -553,6 +553,15 @@
|
||||
"元素精通": 1,
|
||||
"元素充能效率": 1
|
||||
}
|
||||
},
|
||||
"多莉": {
|
||||
"常规": {
|
||||
"暴击率": 1,
|
||||
"暴击伤害": 1,
|
||||
"攻击力": 1,
|
||||
"元素精通": 1,
|
||||
"元素充能效率": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"Talent": {
|
||||
|
@ -1,86 +0,0 @@
|
||||
{
|
||||
"胡桃": "胡桃",
|
||||
"托马": "托马",
|
||||
"宵宫": "宵宫",
|
||||
"烟绯": "烟绯",
|
||||
"可莉": "可莉",
|
||||
"迪卢克": "姥爷",
|
||||
"辛焱": "辛焱",
|
||||
"安柏": "安柏",
|
||||
"香菱": "香菱",
|
||||
"班尼特": "点赞",
|
||||
"珊瑚宫心海": "心海",
|
||||
"达达利亚": "公子",
|
||||
"行秋": "行秋",
|
||||
"莫娜": "莫娜",
|
||||
"芭芭拉": "牧师",
|
||||
"申鹤": "申鹤",
|
||||
"神里绫华": "绫华",
|
||||
"优菈": "优菈",
|
||||
"甘雨": "甘雨",
|
||||
"凯亚": "凯亚",
|
||||
"重云": "重云",
|
||||
"七七": "七七",
|
||||
"迪奥娜": "冰猫",
|
||||
"罗莎莉亚": "修女",
|
||||
"埃洛伊": "异界",
|
||||
"八重神子": "神子",
|
||||
"雷电将军": "雷神",
|
||||
"九条裟罗": "九条",
|
||||
"刻晴": "刻晴",
|
||||
"雷泽": "雷泽",
|
||||
"菲谢尔": "皇女",
|
||||
"丽莎": "丽莎",
|
||||
"北斗": "北斗",
|
||||
"雷主": "雷主",
|
||||
"早柚": "早柚",
|
||||
"枫原万叶": "万叶",
|
||||
"魈": "魈君",
|
||||
"温迪": "温迪",
|
||||
"琴": "团长",
|
||||
"砂糖": "砂糖",
|
||||
"风主": "风主",
|
||||
"荒泷一斗": "一斗",
|
||||
"五郎": "五郎",
|
||||
"阿贝多": "白垩",
|
||||
"钟离": "钟离",
|
||||
"诺艾尔": "女仆",
|
||||
"凝光": "凝光",
|
||||
"岩主": "岩主",
|
||||
"云堇": "云堇",
|
||||
"神里绫人": "绫人",
|
||||
"夜兰": "夜兰",
|
||||
"久岐忍": "阿忍",
|
||||
"鹿野院平藏": "鹿野",
|
||||
"神乐之真意": "神乐",
|
||||
"息灾": "息灾",
|
||||
"赤角石溃杵": "赤角",
|
||||
"松籁响起之时": "松籁",
|
||||
"苍古自由之誓": "苍古",
|
||||
"终末嗟叹之诗": "终末",
|
||||
"冬极白星": "冬极",
|
||||
"不灭月华": "月华",
|
||||
"薙草之稻光": "薙刀",
|
||||
"飞雷之弦振": "飞雷",
|
||||
"雾切之回光": "雾切",
|
||||
"天空之刃": "空刃",
|
||||
"狼的末路": "狼末",
|
||||
"阿莫斯之弓": "痛苦",
|
||||
"天空之卷": "空卷",
|
||||
"天空之傲": "空傲",
|
||||
"和璞鸢": "鸟枪",
|
||||
"四风原典": "四风",
|
||||
"天空之翼": "空翼",
|
||||
"天空之脊": "空脊",
|
||||
"尘世之锁": "尘锁",
|
||||
"无工之剑": "无工",
|
||||
"贯虹之槊": "贯虹",
|
||||
"斫峰之刃": "斫峰",
|
||||
"磐岩结绿": "绿剑",
|
||||
"护摩之杖": "护摩",
|
||||
"波乱月白经津": "波乱",
|
||||
"若水": "若水",
|
||||
"鹿野院平藏": "平藏",
|
||||
"提纳里": "小驴",
|
||||
"柯莱": "柯莱"
|
||||
}
|
@ -25,6 +25,7 @@ GENSHIN_DB_PATH = DATABASE_PATH / 'genshin.db'
|
||||
SUB_DB_PATH = DATABASE_PATH / 'subscription.db'
|
||||
GENSHIN_VOICE_DB_PATH = DATABASE_PATH / 'genshin_voice.db'
|
||||
MANAGER_DB_PATH = DATABASE_PATH / 'manager.db'
|
||||
LEARNING_CHAT_DB_PATH = DATABASE_PATH / 'learning_chat.db'
|
||||
# enka制图资源路径
|
||||
ENKA_RES = RESOURCE_BASE_PATH / 'enka_card'
|
||||
# 原神表情路径
|
||||
@ -48,5 +49,7 @@ PAIMON_CONFIG = Path() / 'config' / 'paimon_config.yml'
|
||||
# 问候语配置文件
|
||||
GREET_CONFIG = Path() / 'config' / 'paimon_greet.yml'
|
||||
GREET_CONFIG_DEFAULT = Path() / 'config' / 'paimon_greet_default.yml'
|
||||
# 群聊学习配置文件
|
||||
LEARNING_CHAT_CONFIG = Path() / 'config' / 'learning_chat.yml'
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
from tortoise import Tortoise
|
||||
from nonebot.log import logger
|
||||
from LittlePaimon.config import GENSHIN_DB_PATH, SUB_DB_PATH, GENSHIN_VOICE_DB_PATH, MANAGER_DB_PATH
|
||||
from LittlePaimon.config import GENSHIN_DB_PATH, SUB_DB_PATH, GENSHIN_VOICE_DB_PATH, MANAGER_DB_PATH, LEARNING_CHAT_DB_PATH
|
||||
|
||||
|
||||
DATABASE = {
|
||||
@ -21,6 +21,10 @@ DATABASE = {
|
||||
"engine": "tortoise.backends.sqlite",
|
||||
"credentials": {"file_path": MANAGER_DB_PATH},
|
||||
},
|
||||
'learning_chat': {
|
||||
"engine": "tortoise.backends.sqlite",
|
||||
"credentials": {"file_path": LEARNING_CHAT_DB_PATH},
|
||||
},
|
||||
},
|
||||
"apps": {
|
||||
"genshin": {
|
||||
@ -38,6 +42,10 @@ DATABASE = {
|
||||
"manager": {
|
||||
"models": ['LittlePaimon.database.models.manager'],
|
||||
"default_connection": "manager",
|
||||
},
|
||||
"learning_chat": {
|
||||
"models": ['LittlePaimon.database.models.learning_chat'],
|
||||
"default_connection": "learning_chat",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -5,3 +5,4 @@ from .subscription import *
|
||||
from .genshin_voice import *
|
||||
from .manager import *
|
||||
from .abyss_info import *
|
||||
from .learning_chat import *
|
||||
|
171
LittlePaimon/database/models/learning_chat.py
Normal file
171
LittlePaimon/database/models/learning_chat.py
Normal file
@ -0,0 +1,171 @@
|
||||
import time
|
||||
from typing import List, Optional, Iterator
|
||||
from pydantic import BaseModel
|
||||
|
||||
try:
|
||||
import ujson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from tortoise import fields
|
||||
from tortoise.models import Model
|
||||
|
||||
|
||||
class BanWord(BaseModel):
|
||||
keywords: str
|
||||
group_id: int
|
||||
reason: Optional[str]
|
||||
time: Optional[int]
|
||||
|
||||
|
||||
class BanWords(BaseModel):
|
||||
bans: List[BanWord] = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.bans)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.bans[item]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.bans[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.bans[key]
|
||||
|
||||
def __iter__(self) -> Iterator[BanWord]:
|
||||
return iter(self.bans)
|
||||
|
||||
def __reversed__(self):
|
||||
return reversed(self.bans)
|
||||
|
||||
def append(self, ban: BanWord):
|
||||
self.bans.append(ban)
|
||||
|
||||
def index(self, ban: BanWord) -> int:
|
||||
return self.bans.index(ban)
|
||||
|
||||
|
||||
|
||||
# @staticmethod
|
||||
# def tortoise_decoder(text: str) -> List["BanWord"]:
|
||||
# print('ban_decoder:', text)
|
||||
# return [BanWord.parse_obj(item) for item in json.loads(text)]
|
||||
#
|
||||
# @staticmethod
|
||||
# def tortoise_encoder(models: List["BanWord"]) -> str:
|
||||
# print('ban_encoder:', models)
|
||||
# if not models:
|
||||
# return ''
|
||||
# elif isinstance(models[0], BanWord):
|
||||
# return json.dumps([model.dict() for model in models])
|
||||
|
||||
|
||||
class Answer(BaseModel):
|
||||
keywords: str
|
||||
group_id: int
|
||||
count: int
|
||||
time: int
|
||||
messages: List[str]
|
||||
|
||||
# @staticmethod
|
||||
# def tortoise_decoder(text: str) -> List["Answer"]:
|
||||
# print('answer_decoder:', text)
|
||||
# return [Answer.parse_obj(item) for item in json.loads(text)]
|
||||
#
|
||||
# @staticmethod
|
||||
# def tortoise_encoder(models: List["Answer"]) -> str:
|
||||
# print('answer_encoder:', models)
|
||||
# if not models:
|
||||
# return ''
|
||||
# elif isinstance(models[0], BanWord):
|
||||
# return json.dumps([model.dict() for model in models])
|
||||
|
||||
|
||||
class Answers(BaseModel):
|
||||
answers: List[Answer] = []
|
||||
|
||||
def __len__(self):
|
||||
return len(self.answers)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.answers[item]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.answers[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self.answers[key]
|
||||
|
||||
def __iter__(self) -> Iterator[Answer]:
|
||||
return iter(self.answers)
|
||||
|
||||
def __reversed__(self):
|
||||
return reversed(self.answers)
|
||||
|
||||
def append(self, answer: Answer):
|
||||
self.answers.append(answer)
|
||||
|
||||
def index(self, answer: Answer) -> int:
|
||||
return self.answers.index(answer)
|
||||
|
||||
|
||||
class Message(Model):
|
||||
id: int = fields.IntField(pk=True, generated=True, auto_increment=True)
|
||||
"""自增主键"""
|
||||
group_id: int = fields.IntField()
|
||||
"""群id"""
|
||||
user_id: int = fields.IntField()
|
||||
"""用户id"""
|
||||
raw_message: str = fields.TextField()
|
||||
"""原始消息"""
|
||||
is_plain_text: bool = fields.BooleanField()
|
||||
"""是否为纯文本"""
|
||||
plain_text: str = fields.TextField()
|
||||
"""纯文本"""
|
||||
keywords: str = fields.TextField()
|
||||
"""关键词"""
|
||||
time: int = fields.IntField()
|
||||
"""时间戳"""
|
||||
|
||||
class Meta:
|
||||
table = 'Message'
|
||||
indexes = ('time',)
|
||||
ordering = ['-time']
|
||||
|
||||
|
||||
class Context(Model):
|
||||
id: int = fields.IntField(pk=True, generated=True, auto_increment=True)
|
||||
"""自增主键"""
|
||||
keywords: str = fields.TextField()
|
||||
"""关键词"""
|
||||
time: int = fields.IntField(default=int(time.time()))
|
||||
"""时间戳"""
|
||||
count: int = fields.IntField(default=1)
|
||||
"""次数"""
|
||||
answers: Answers = fields.JSONField(encoder=Answers.json, decoder=Answers.parse_raw, default=Answers())
|
||||
"""答案列表"""
|
||||
clear_time: Optional[int] = fields.IntField(null=True)
|
||||
"""清除时间戳"""
|
||||
ban: BanWords = fields.JSONField(encoder=BanWords.json, decoder=BanWords.parse_raw, default=BanWords())
|
||||
"""禁用词列表"""
|
||||
|
||||
class Meta:
|
||||
table = 'Context'
|
||||
indexes = ('keywords', 'count', 'time')
|
||||
ordering = ['-time', '-count']
|
||||
|
||||
|
||||
class BlackList(Model):
|
||||
id: int = fields.IntField(pk=True, generated=True, auto_increment=True)
|
||||
"""自增主键"""
|
||||
group_id: int = fields.IntField()
|
||||
"""群id"""
|
||||
answers: List[str] = fields.JSONField(default=[])
|
||||
"""答案"""
|
||||
answers_reserve: List[str] = fields.JSONField(default=[])
|
||||
"""备用答案"""
|
||||
|
||||
class Meta:
|
||||
table = 'BlackList'
|
||||
indexes = ('group_id',)
|
@ -49,7 +49,7 @@ voice_cmd = on_regex(rf'^(?P<chara>({CHARA_RE})?)说(?P<text>[\w,。!?、
|
||||
|
||||
@voice_cmd.handle()
|
||||
async def _(event: Union[GroupMessageEvent, PrivateMessageEvent], regex_dict: dict = RegexDict()):
|
||||
regex_dict['text'] = filter_msg(regex_dict['text'].replace('\r', '').replace('\n', ''))
|
||||
regex_dict['text'] = filter_msg(regex_dict['text'].replace('\r', '').replace('\n', ''), '星')
|
||||
if not freq_limiter.check(
|
||||
f'genshin_ai_voice_{event.group_id if isinstance(event, GroupMessageEvent) else event.user_id}'):
|
||||
await voice_cmd.finish(
|
||||
|
@ -2,11 +2,11 @@ from nonebot import get_bot, on_command
|
||||
from nonebot.adapters.onebot.v11 import MessageEvent, MessageSegment
|
||||
from nonebot.plugin import PluginMetadata
|
||||
|
||||
from .generate import *
|
||||
from LittlePaimon.utils.message import CommandObjectID, CommandSwitch, CommandTime
|
||||
from LittlePaimon.utils import scheduler, logger
|
||||
from LittlePaimon.database.models import GeneralSub
|
||||
from LittlePaimon import DRIVER, SUPERUSERS
|
||||
from LittlePaimon.database.models import GeneralSub
|
||||
from LittlePaimon.utils import scheduler, logger
|
||||
from LittlePaimon.utils.message import CommandObjectID, CommandSwitch, CommandTime
|
||||
from .generate import *
|
||||
|
||||
__plugin_meta__ = PluginMetadata(
|
||||
name="原神日历",
|
||||
@ -32,7 +32,7 @@ async def _(event: MessageEvent, sub_id=CommandObjectID(), switch=CommandSwitch(
|
||||
im = await generate_day_schedule('cn')
|
||||
await calendar.finish(MessageSegment.image(im))
|
||||
else:
|
||||
if event.sender.role not in ['admin', 'owner'] or event.user_id not in SUPERUSERS:
|
||||
if event.sender.role not in ['admin', 'owner'] and event.user_id not in SUPERUSERS:
|
||||
await calendar.finish('你没有权限管理原神日历订阅')
|
||||
sub_data = {'sub_id': sub_id, 'sub_type': event.message_type, 'sub_event': '原神日历'}
|
||||
|
||||
|
210
LittlePaimon/plugins/Learning_Chat/__init__.py
Normal file
210
LittlePaimon/plugins/Learning_Chat/__init__.py
Normal file
@ -0,0 +1,210 @@
|
||||
import asyncio
|
||||
import random
|
||||
import re
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
|
||||
from nonebot import on_keyword, on_message, on_command, get_bot
|
||||
from nonebot.adapters.onebot.v11 import MessageEvent, GroupMessageEvent, GROUP, Message, ActionFailed
|
||||
from nonebot.params import Arg, CommandArg
|
||||
from nonebot.permission import SUPERUSER
|
||||
from nonebot.plugin import PluginMetadata
|
||||
from nonebot.rule import to_me, Rule
|
||||
from nonebot.typing import T_State
|
||||
|
||||
from LittlePaimon import NICKNAME, SUPERUSERS
|
||||
from LittlePaimon.utils import scheduler, logger
|
||||
from .api import is_shutup
|
||||
from .models import LearningChat
|
||||
from .config import config_manager
|
||||
|
||||
__plugin_meta__ = PluginMetadata(
|
||||
name='群聊学习',
|
||||
description='群聊学习',
|
||||
usage='群聊学习',
|
||||
extra={
|
||||
'author': '惜月',
|
||||
'priority': 16,
|
||||
}
|
||||
)
|
||||
|
||||
message_id_lock = threading.Lock()
|
||||
message_id_dict = defaultdict(list)
|
||||
|
||||
|
||||
async def chat_rule(event: GroupMessageEvent, state: T_State) -> bool:
|
||||
if not config_manager.config.total_enable:
|
||||
return False
|
||||
if event.group_id in config_manager.config.ban_groups:
|
||||
return False
|
||||
if event.user_id in config_manager.config.ban_users:
|
||||
return False
|
||||
if any(w in event.raw_message for w in config_manager.config.ban_words):
|
||||
return False
|
||||
to_learn = True
|
||||
with message_id_lock:
|
||||
"""多账号登陆,且在同一群中时;避免一条消息被处理多次"""
|
||||
message_id = event.message_id
|
||||
group_id = event.group_id
|
||||
if group_id in message_id_dict and message_id in message_id_dict[group_id]:
|
||||
to_learn = False
|
||||
|
||||
message_id_dict[group_id].append(message_id)
|
||||
if len(message_id_dict[group_id]) > 100:
|
||||
message_id_dict[group_id] = message_id_dict[group_id][:-10]
|
||||
|
||||
chat = LearningChat(event)
|
||||
answers = await chat.answer()
|
||||
if to_learn:
|
||||
await chat.learn()
|
||||
if answers:
|
||||
state['answers'] = answers
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
async def is_reply(event: GroupMessageEvent) -> bool:
|
||||
return bool(event.reply)
|
||||
|
||||
|
||||
learning_chat = on_message(priority=99, block=False, rule=Rule(chat_rule), permission=GROUP, state={
|
||||
'pm_name': '群聊学习',
|
||||
'pm_description': '(被动技能)bot会学习群友们的发言',
|
||||
'pm_usage': '群聊学习',
|
||||
'pm_priority': 1
|
||||
})
|
||||
ban_chat = on_keyword({'不可以', '达咩', '不行', 'no'}, rule=to_me(), priority=1, block=True, state={
|
||||
'pm_name': '群聊学习禁用',
|
||||
'pm_description': '如果bot说了不好的话,回复这句话,告诉TA不能这么说,需管理权限',
|
||||
'pm_usage': '@bot 不可以',
|
||||
'pm_priority': 2
|
||||
})
|
||||
set_enable = on_keyword({'学说话', '快学', '开启学习', '闭嘴', '别学', '关闭学习'}, rule=to_me(), priority=1, block=True, state={
|
||||
'pm_name': '群聊学习开关',
|
||||
'pm_description': '开启或关闭当前群的学习能力,需管理权限',
|
||||
'pm_usage': '@bot 开启|关闭学习',
|
||||
'pm_priority': 3
|
||||
})
|
||||
# set_config = on_command('chat', aliases={'群聊学习设置'}, permission=SUPERUSER, priority=1, block=True, state={
|
||||
# 'pm_name': 'ysbc',
|
||||
# 'pm_description': '查询已绑定的原神cookie情况',
|
||||
# 'pm_usage': 'ysbc',
|
||||
# 'pm_priority': 2
|
||||
# })
|
||||
|
||||
|
||||
# ban_msg_latest = on_fullmatch(msg=('不可以发这个', '不能发这个', '达咩达咩'), rule=to_me(), priority=1, block=True, permission=GROUP_OWNER | GROUP_ADMIN | SUPERUSER)
|
||||
|
||||
|
||||
@learning_chat.handle()
|
||||
async def _(event: GroupMessageEvent, answers=Arg('answers')):
|
||||
for item in answers:
|
||||
logger.info('群聊学习', f'{NICKNAME}即将向群<m>{event.group_id}</m>发送<m>"{item}"</m>')
|
||||
await asyncio.sleep(random.randint(1, 3))
|
||||
try:
|
||||
await learning_chat.send(Message(item))
|
||||
except ActionFailed:
|
||||
if not await is_shutup(event.self_id, event.group_id):
|
||||
# Bot没用在禁言中但发送失败,说明该条消息被风控,禁用调
|
||||
logger.info('群聊学习', f'{NICKNAME}将群<m>{event.group_id}</m>的发言<m>"{item}"</m>列入禁用列表')
|
||||
await LearningChat.ban(event.group_id, event.self_id,
|
||||
str(item), 'ActionFailed')
|
||||
break
|
||||
|
||||
|
||||
@ban_chat.handle()
|
||||
async def _(event: GroupMessageEvent):
|
||||
if event.sender.role not in ['admin', 'owner'] and event.user_id not in SUPERUSERS:
|
||||
await ban_chat.finish(random.choice([f'{NICKNAME}就喜欢说这个,哼!', f'你管得着{NICKNAME}吗!']))
|
||||
if event.reply:
|
||||
raw_message = ''
|
||||
for item in event.reply.message:
|
||||
raw_reply = str(item)
|
||||
# 去掉图片消息中的 url, subType 等字段
|
||||
raw_message += re.sub(r'(\[CQ:.+)(,url=*)(])',
|
||||
r'\1\2', raw_reply)
|
||||
logger.info('群聊学习', f'{NICKNAME}将群<m>{event.group_id}</m>的发言<m>"{raw_message}"</m>列入禁用列表')
|
||||
|
||||
if await LearningChat.ban(event.group_id, event.self_id, raw_message, str(event.user_id)):
|
||||
await ban_chat.finish(
|
||||
random.choice([f'{NICKNAME}知道错了...达咩!', f'{NICKNAME}不会再这么说了...', f'果面呐噻,{NICKNAME}说错话了...']))
|
||||
else:
|
||||
logger.info('群聊学习', f'{NICKNAME}将群<m>{event.group_id}</m>的最后一条发言列入禁用列表')
|
||||
|
||||
if await LearningChat.ban(event.group_id, event.self_id, '', str(event.user_id)):
|
||||
await ban_chat.finish(
|
||||
random.choice([f'{NICKNAME}知道错了...达咩!', f'{NICKNAME}不会再这么说了...', f'果面呐噻,{NICKNAME}说错话了...']))
|
||||
|
||||
|
||||
@set_enable.handle()
|
||||
async def _(event: MessageEvent):
|
||||
if event.user_id in SUPERUSERS:
|
||||
if any(w in event.raw_message for w in {'学说话', '快学', '开启学习'}):
|
||||
if config_manager.config.total_enable:
|
||||
msg = f'{NICKNAME}已经在努力尝试看懂你们说的话了!'
|
||||
else:
|
||||
config_manager.config.total_enable = True
|
||||
msg = f'{NICKNAME}会尝试学你们说怪话!'
|
||||
elif config_manager.config.total_enable:
|
||||
config_manager.config.total_enable = False
|
||||
msg = f'好好好,{NICKNAME}不学说话就是了!'
|
||||
else:
|
||||
msg = f'{NICKNAME}明明没有在学你们说话!'
|
||||
elif isinstance(event, GroupMessageEvent) and event.sender.role in {'admin', 'owner'}:
|
||||
if any(w in event.raw_message for w in {'学说话', '快学', '开启学习'}):
|
||||
if event.group_id in config_manager.config.ban_groups:
|
||||
config_manager.config.ban_groups.remove(event.group_id)
|
||||
msg = f'{NICKNAME}会尝试学你们说怪话!'
|
||||
else:
|
||||
msg = f'{NICKNAME}已经在努力尝试看懂你们说的话了!'
|
||||
elif event.group_id not in config_manager.config.ban_groups:
|
||||
config_manager.config.ban_groups.append(event.group_id)
|
||||
msg = f'好好好,{NICKNAME}不学说话就是了!'
|
||||
else:
|
||||
msg = f'{NICKNAME}明明没有在学你们说话!'
|
||||
else:
|
||||
msg = random.choice([f'{NICKNAME}就喜欢学,哼!', f'你管得着{NICKNAME}吗!'])
|
||||
await set_enable.finish(msg)
|
||||
|
||||
|
||||
# @set_config.handle()
|
||||
# async def _(event: MessageEvent, state: T_State, msg: Message = CommandArg()):
|
||||
# state['config_list'] = config_manager.config_list
|
||||
# configs_str = '\n'.join([f'{k}: {v}' for k, v in config_manager.config.dict(by_alias=True).items()])
|
||||
# if msg:
|
||||
# msg = msg.extract_plain_text().strip().split(' ')
|
||||
# if state['key'] in state['config_list']:
|
||||
# state['key'] = msg[0]
|
||||
# if len(msg) > 1:
|
||||
# state['value'] = msg[1]
|
||||
# else:
|
||||
# state['msg'] = '没有叫'
|
||||
|
||||
# @ban_msg_latest.handle()
|
||||
# async def _(event: GroupMessageEvent):
|
||||
# logger.info('群聊学习', f'{NICKNAME}将群<m>{event.group_id}</m>的最后一条发言列入禁用列表')
|
||||
#
|
||||
# if await LearningChat.ban(event.group_id, event.self_id, '', str(event.user_id)):
|
||||
# msg_send = ['派蒙知道错了...达咩!', '派蒙不会再这么说了...', '果面呐噻,派蒙说错话了...']
|
||||
# await ban_msg_latest.finish(random.choice(msg_send))
|
||||
|
||||
|
||||
@scheduler.scheduled_job('interval', seconds=5, misfire_grace_time=5)
|
||||
async def speak_up():
|
||||
if not config_manager.config.total_enable:
|
||||
return
|
||||
if not (ret := await LearningChat.speak()):
|
||||
return
|
||||
bot_id, group_id, messages = ret
|
||||
if group_id in config_manager.config.ban_groups:
|
||||
return
|
||||
for msg in messages:
|
||||
logger.info('群聊学习', f'{NICKNAME}即将向群<m>{group_id}</m>发送<m>"{msg}"</m>')
|
||||
await get_bot(str(bot_id)).send_group_msg(group_id=group_id, message=msg)
|
||||
await asyncio.sleep(random.randint(2, 4))
|
||||
|
||||
|
||||
@scheduler.scheduled_job('cron', hour='4')
|
||||
def update_data():
|
||||
if config_manager.config.total_enable:
|
||||
LearningChat.clear_up_context()
|
16
LittlePaimon/plugins/Learning_Chat/api.py
Normal file
16
LittlePaimon/plugins/Learning_Chat/api.py
Normal file
@ -0,0 +1,16 @@
|
||||
import time
|
||||
|
||||
from nonebot import get_bot
|
||||
from nonebot.adapters.onebot.v11 import Bot
|
||||
|
||||
|
||||
async def is_shutup(self_id: int, group_id: int) -> bool:
|
||||
"""
|
||||
判断账号是否在禁言
|
||||
:param self_id: 自身id
|
||||
:param group_id: 群id
|
||||
"""
|
||||
bot: Bot = get_bot(str(self_id))
|
||||
info = await bot.get_group_member_info(user_id=self_id, group_id=group_id)
|
||||
|
||||
return info['shut_up_timestamp'] > int(time.time())
|
50
LittlePaimon/plugins/Learning_Chat/config.py
Normal file
50
LittlePaimon/plugins/Learning_Chat/config.py
Normal file
@ -0,0 +1,50 @@
|
||||
from typing import List
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from LittlePaimon.config import LEARNING_CHAT_CONFIG
|
||||
from LittlePaimon.utils.files import load_yaml, save_yaml
|
||||
|
||||
|
||||
class ChatConfig(BaseModel):
|
||||
total_enable: bool = Field(True, description='群聊学习总开关')
|
||||
ban_words: List[str] = Field([], description='屏蔽词')
|
||||
ban_groups: List[int] = Field([], description='屏蔽群')
|
||||
ban_users: List[int] = Field([], description='屏蔽用户')
|
||||
KEYWORDS_SIZE: int = Field(3, alias='单句关键词分词数量')
|
||||
answer_threshold: int = Field(4, alias='发言阈值')
|
||||
answer_threshold_weights: List[int] = Field([10, 30, 60], alias='发言阈值权重')
|
||||
cross_group_threshold: int = Field(2, alias='跨群回复阈值')
|
||||
repeat_threshold: int = Field(3, alias='复读阈值')
|
||||
speak_threshold: int = Field(5, alias='主动发言阈值')
|
||||
split_probability: float = Field(0.5, alias='按逗号分割回复概率')
|
||||
voice_probability: float = Field(0.1, alias='以语音回复概率')
|
||||
speak_continuously_probability: float = Field(0.5, alias='连续主动发言概率')
|
||||
speak_poke_probability: float = Field(0.5, alias='主动发言附带戳一戳概率')
|
||||
speak_continuously_max_len: int = Field(3, alias='最大连续说话句数')
|
||||
save_time_threshold: int = Field(3600, alias='持久化间隔秒数')
|
||||
save_count_threshold: int = Field(1000, alias='持久化间隔条数')
|
||||
|
||||
|
||||
class ChatConfigManager:
|
||||
|
||||
def __init__(self):
|
||||
self.file_path = LEARNING_CHAT_CONFIG
|
||||
if self.file_path.exists():
|
||||
self.config = ChatConfig.parse_obj(load_yaml(self.file_path))
|
||||
else:
|
||||
self.config = ChatConfig()
|
||||
self.save()
|
||||
|
||||
@property
|
||||
def config_list(self) -> List[str]:
|
||||
return list(self.config.dict(by_alias=True).keys())
|
||||
|
||||
def save(self):
|
||||
save_yaml(self.config.dict(by_alias=True), self.file_path)
|
||||
|
||||
# def set_config(self, config_name: str, value: any):
|
||||
# if config_name not in self.config.dict(by_alias=True).keys():
|
||||
|
||||
|
||||
config_manager = ChatConfigManager()
|
542
LittlePaimon/plugins/Learning_Chat/models.py
Normal file
542
LittlePaimon/plugins/Learning_Chat/models.py
Normal file
@ -0,0 +1,542 @@
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from functools import cached_property, cmp_to_key
|
||||
from typing import Generator, List, Optional, Union, Tuple, Dict, Any
|
||||
|
||||
try:
|
||||
import jieba_fast.analyse as jieba_analyse
|
||||
except ImportError:
|
||||
import jieba.analyse as jieba_analyse
|
||||
import pypinyin
|
||||
from dataclasses import dataclass
|
||||
from collections import defaultdict
|
||||
import threading
|
||||
|
||||
from nonebot.adapters.onebot.v11 import Message, MessageSegment, GroupMessageEvent
|
||||
|
||||
from LittlePaimon import NICKNAME
|
||||
from LittlePaimon.database.models import Message, Context, BlackList, Answers, Answer, BanWord
|
||||
from .config import config_manager
|
||||
|
||||
config = config_manager.config
|
||||
|
||||
|
||||
@dataclass
|
||||
class MessageData:
|
||||
group_id: int
|
||||
user_id: int
|
||||
raw_message: str
|
||||
plain_text: str
|
||||
time: int
|
||||
bot_id: int
|
||||
|
||||
@cached_property
|
||||
def is_plain_text(self) -> bool:
|
||||
"""
|
||||
判断消息是否为纯文本
|
||||
"""
|
||||
return '[CQ:' not in self.raw_message and len(self.plain_text) != 0
|
||||
|
||||
@cached_property
|
||||
def is_image(self) -> bool:
|
||||
"""
|
||||
判断消息是否为图片
|
||||
"""
|
||||
return '[CQ:image,' in self.raw_message or '[CQ:face,' in self.raw_message
|
||||
|
||||
@cached_property
|
||||
def _keywords_list(self):
|
||||
"""
|
||||
获取纯文本部分的关键词结果
|
||||
"""
|
||||
if not self.is_plain_text and len(self.plain_text) == 0:
|
||||
return []
|
||||
|
||||
return jieba_analyse.extract_tags(
|
||||
self.plain_text, topK=config.KEYWORDS_SIZE)
|
||||
|
||||
@cached_property
|
||||
def keywords_len(self) -> int:
|
||||
"""
|
||||
获取关键词数量
|
||||
:return:
|
||||
"""
|
||||
return len(self._keywords_list)
|
||||
|
||||
@cached_property
|
||||
def keywords(self) -> str:
|
||||
"""将关键词列表字符串"""
|
||||
if not self.is_plain_text and len(self.plain_text) == 0:
|
||||
return self.raw_message
|
||||
|
||||
if self.keywords_len < 2:
|
||||
return self.plain_text
|
||||
else:
|
||||
# keywords_list.sort()
|
||||
return ' '.join(self._keywords_list)
|
||||
|
||||
@cached_property
|
||||
def keywords_pinyin(self) -> str:
|
||||
"""将关键词拼音列表字符串"""
|
||||
return ''.join([item[0] for item in pypinyin.pinyin(
|
||||
self.keywords, style=pypinyin.NORMAL, errors='default')]).lower()
|
||||
|
||||
@cached_property
|
||||
def to_me(self) -> bool:
|
||||
"""判断是否为艾特机器人"""
|
||||
return self.plain_text.startswith(NICKNAME)
|
||||
|
||||
|
||||
class LearningChat:
|
||||
reply_cache = defaultdict(lambda: defaultdict(list))
|
||||
"""回复的消息缓存"""
|
||||
message_cache = defaultdict(list)
|
||||
"""群消息缓存"""
|
||||
|
||||
_reply_lock = threading.Lock()
|
||||
_message_lock = threading.Lock()
|
||||
_save_reserve_size = 100 # 保存时,给内存中保留的大小
|
||||
_late_save_time = 0 # 上次保存(消息数据持久化)的时刻 ( time.time(), 秒 )
|
||||
|
||||
def __init__(self, event: Union[GroupMessageEvent, MessageData]):
|
||||
if isinstance(event, GroupMessageEvent):
|
||||
self.message = MessageData(
|
||||
group_id=event.group_id,
|
||||
user_id=event.user_id,
|
||||
raw_message=re.sub(r',subType=\d+,url=.+]', r']', event.raw_message),
|
||||
plain_text=event.get_plaintext(),
|
||||
time=event.time,
|
||||
bot_id=event.self_id)
|
||||
else:
|
||||
self.message = event
|
||||
|
||||
async def learn(self) -> bool:
|
||||
"""学习这句话"""
|
||||
if not len(self.message.raw_message.strip()):
|
||||
return False
|
||||
|
||||
if self.message.group_id in LearningChat.message_cache:
|
||||
group_msgs = LearningChat.message_cache[self.message.group_id]
|
||||
# 将群里上一条发言插入数据库
|
||||
group_pre_msg = group_msgs[-1] if group_msgs else None
|
||||
await self._update_context(group_pre_msg)
|
||||
|
||||
if group_pre_msg and group_pre_msg['user_id'] != self.message.user_id:
|
||||
# 该用户在群里的上一条发言(倒序三句之内)
|
||||
for msg in group_msgs[:-3:-1]:
|
||||
if msg['user_id'] == self.message.user_id:
|
||||
await self._update_context(msg)
|
||||
break
|
||||
await self._update_message()
|
||||
return True
|
||||
|
||||
async def answer(self) -> Optional[Generator[Union[Message, Message], None, None]]:
|
||||
"""获取这句话的回复"""
|
||||
if self.message.is_plain_text and len(self.message.plain_text) <= 1:
|
||||
"""不回复单个字的对话"""
|
||||
return None
|
||||
|
||||
if not (results := await self._get_context()):
|
||||
return None
|
||||
group_id = self.message.group_id
|
||||
raw_message = self.message.raw_message
|
||||
keywords = self.message.keywords
|
||||
bot_id = self.message.bot_id
|
||||
|
||||
group_bot_replies = LearningChat.reply_cache[group_id][bot_id]
|
||||
with LearningChat._reply_lock:
|
||||
group_bot_replies.append({
|
||||
'time': int(time.time()),
|
||||
'pre_raw_message': raw_message,
|
||||
'pre_keywords': keywords,
|
||||
'reply': '[LearningChat: Reply]', # flag
|
||||
'reply_keywords': '[LearningChat: Reply]', # flag
|
||||
})
|
||||
|
||||
def yield_results(results_: Tuple[List[str], str]) -> Generator[Message, None, None]:
|
||||
answer_list, answer_keywords = results_
|
||||
|
||||
for item in answer_list:
|
||||
with LearningChat._reply_lock:
|
||||
LearningChat.reply_cache[group_id][bot_id].append({
|
||||
'time': int(time.time()),
|
||||
'pre_raw_message': raw_message,
|
||||
'pre_keywords': keywords,
|
||||
'reply': item,
|
||||
'reply_keywords': answer_keywords,
|
||||
})
|
||||
if '[CQ:' not in item and len(item) > 1 and random.random() < config.voice_probability:
|
||||
yield MessageSegment.record(f'http://233366.proxy.nscc-gz.cn:8888/?text={item}&speaker=派蒙')
|
||||
else:
|
||||
yield item
|
||||
|
||||
with LearningChat._reply_lock:
|
||||
LearningChat.reply_cache[self.message.group_id][self.message.bot_id] = \
|
||||
LearningChat.reply_cache[self.message.group_id][
|
||||
self.message.bot_id][
|
||||
-self._save_reserve_size:]
|
||||
|
||||
return yield_results(results)
|
||||
|
||||
@staticmethod
|
||||
async def speak() -> Optional[Tuple[int, int, List[Message]]]:
|
||||
"""
|
||||
主动发言,返回当前最希望发言的 bot 账号、群号、发言消息 List,也有可能不发言
|
||||
"""
|
||||
basic_msgs_len = 10
|
||||
basic_delay = 600
|
||||
|
||||
def group_popularity_cmp(lhs: Tuple[int, List[Dict[str, Any]]],
|
||||
rhs: Tuple[int, List[Dict[str, Any]]]) -> int:
|
||||
def cmp(a: Any, b: Any):
|
||||
return (a > b) - (a < b)
|
||||
|
||||
lhs_group_id, lhs_msgs = lhs
|
||||
rhs_group_id, rhs_msgs = rhs
|
||||
lhs_len = len(lhs_msgs)
|
||||
rhs_len = len(rhs_msgs)
|
||||
if lhs_len < basic_msgs_len or rhs_len < basic_msgs_len:
|
||||
return cmp(lhs_len, rhs_len)
|
||||
|
||||
lhs_duration = lhs_msgs[-1]['time'] - lhs_msgs[0]['time']
|
||||
rhs_duration = rhs_msgs[-1]['time'] - rhs_msgs[0]['time']
|
||||
|
||||
if not lhs_duration or not rhs_duration:
|
||||
return cmp(lhs_len, rhs_len)
|
||||
|
||||
return cmp(lhs_len / lhs_duration,
|
||||
rhs_len / rhs_duration)
|
||||
|
||||
# 按群聊热度排序
|
||||
popularity = sorted(LearningChat.message_cache.items(),
|
||||
key=cmp_to_key(group_popularity_cmp))
|
||||
cur_time = time.time()
|
||||
for group_id, group_msgs in popularity:
|
||||
group_replies = LearningChat.reply_cache[group_id]
|
||||
if not len(group_replies) or len(group_msgs) < basic_msgs_len:
|
||||
continue
|
||||
|
||||
group_replies_front = list(group_replies.values())[0]
|
||||
if not len(group_replies_front) or group_replies_front[-1]['time'] > group_msgs[-1]['time']:
|
||||
continue
|
||||
|
||||
msgs_len = len(group_msgs)
|
||||
latest_time = group_msgs[-1]['time']
|
||||
duration = latest_time - group_msgs[0]['time']
|
||||
avg_interval = duration / msgs_len
|
||||
|
||||
if cur_time - latest_time < avg_interval * config.speak_threshold + basic_delay:
|
||||
continue
|
||||
# append 一个 flag, 防止这个群热度特别高,但压根就没有可用的 context 时,每次 speak 都查这个群,浪费时间
|
||||
with LearningChat._reply_lock:
|
||||
group_replies_front.append({
|
||||
'time': int(cur_time),
|
||||
'pre_raw_message': '[PallasBot: Speak]',
|
||||
'pre_keywords': '[PallasBot: Speak]',
|
||||
'reply': '[PallasBot: Speak]',
|
||||
'reply_keywords': '[PallasBot: Speak]',
|
||||
})
|
||||
|
||||
available_time = cur_time - 24 * 3600
|
||||
speak_context = await Context.filter(count__gt=config.answer_threshold,
|
||||
time__gt=available_time).all()
|
||||
speak_context_right = []
|
||||
for context in speak_context:
|
||||
for answer in context.answers:
|
||||
if answer.group_id == group_id and answer.time > available_time and answer.count > config.answer_threshold:
|
||||
speak_context_right.append(context)
|
||||
break
|
||||
if not speak_context_right:
|
||||
continue
|
||||
speak_context_right.sort(key=lambda x: len(x.ban))
|
||||
|
||||
ban_keywords = await LearningChat._get_ban_keywords(speak_context_right[0], group_id)
|
||||
messages = [answer.messages
|
||||
for answer in speak_context_right[0].answers
|
||||
if answer.count >= config.answer_threshold
|
||||
and answer.keywords not in ban_keywords
|
||||
and answer.group_id == group_id]
|
||||
if not messages:
|
||||
continue
|
||||
speak = random.choice(random.choice(messages))
|
||||
|
||||
bot_id = random.choice([bid for bid in group_replies.keys() if bid])
|
||||
with LearningChat._reply_lock:
|
||||
group_replies[bot_id].append({
|
||||
'time': int(cur_time),
|
||||
'pre_raw_message': '[PallasBot: Speak]',
|
||||
'pre_keywords': '[PallasBot: Speak]',
|
||||
'reply': speak,
|
||||
'reply_keywords': '[PallasBot: Speak]',
|
||||
})
|
||||
|
||||
speak_list = [Message(speak)]
|
||||
while random.random() < config.speak_continuously_probability and len(
|
||||
speak_list) < config.speak_continuously_max_len:
|
||||
pre_msg = str(speak_list[-1])
|
||||
answer = await LearningChat(MessageData(group_id=group_id,
|
||||
user_id=0,
|
||||
raw_message=pre_msg,
|
||||
plain_text=pre_msg,
|
||||
time=int(cur_time),
|
||||
bot_id=0)).answer()
|
||||
if not answer:
|
||||
break
|
||||
speak_list.extend(answer)
|
||||
|
||||
if random.random() < config.speak_poke_probability:
|
||||
target_id = random.choice(LearningChat.message_cache[group_id])['user_id']
|
||||
speak_list.append(Message(f'[CQ:poke,qq={target_id}]'))
|
||||
|
||||
return bot_id, group_id, speak_list
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
async def ban(group_id: int, bot_id: int, ban_raw_message: str, reason: str) -> bool:
|
||||
"""
|
||||
禁止以后回复这句话,仅对该群有效果
|
||||
"""
|
||||
|
||||
if group_id not in LearningChat.reply_cache:
|
||||
return False
|
||||
|
||||
ban_reply = None
|
||||
reply_data = LearningChat.reply_cache[group_id][bot_id][::-1]
|
||||
|
||||
for reply in reply_data:
|
||||
cur_reply = reply['reply']
|
||||
# 为空时就直接 ban 最后一条回复
|
||||
if not ban_raw_message or ban_raw_message in cur_reply:
|
||||
ban_reply = reply
|
||||
break
|
||||
|
||||
# 这种情况一般是有些 CQ 码,牛牛发送的时候,和被回复的时候,里面的内容不一样
|
||||
if not ban_reply:
|
||||
if search := re.search(r'(\[CQ:[a-zA-z0-9-_.]+)', ban_raw_message):
|
||||
type_keyword = search[1]
|
||||
for reply in reply_data:
|
||||
cur_reply = reply['reply']
|
||||
if type_keyword in cur_reply:
|
||||
ban_reply = reply
|
||||
break
|
||||
|
||||
if not ban_reply:
|
||||
return False
|
||||
|
||||
pre_keywords = reply['pre_keywords']
|
||||
keywords = reply['reply_keywords']
|
||||
|
||||
ban, _ = await Context.get_or_create(keywords=pre_keywords)
|
||||
ban.ban.append(BanWord(keywords=keywords,
|
||||
group_id=group_id,
|
||||
reason=reason,
|
||||
time=int(time.time())))
|
||||
await ban.save()
|
||||
blacklist, _ = await BlackList.get_or_create(group_id=group_id)
|
||||
if keywords in blacklist.answers_reserve:
|
||||
blacklist.answers.append(keywords)
|
||||
else:
|
||||
blacklist.answers_reserve.append(keywords)
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
async def persistence(cur_time: int = int(time.time())):
|
||||
"""
|
||||
持久化
|
||||
"""
|
||||
with LearningChat._message_lock:
|
||||
if save_list := [msg for group_msgs in LearningChat.message_cache.values() for msg in group_msgs if
|
||||
msg['time'] > LearningChat._late_save_time]:
|
||||
LearningChat.message_cache = {group_id: group_msgs[-LearningChat._save_reserve_size:] for
|
||||
group_id, group_msgs in LearningChat.message_cache.items()}
|
||||
LearningChat._late_save_time = cur_time
|
||||
else:
|
||||
return
|
||||
|
||||
await Message.bulk_create([Message(**msg) for msg in save_list])
|
||||
|
||||
async def _get_context(self):
|
||||
"""获取上下文消息"""
|
||||
if msgs := LearningChat.message_cache.get(self.message.group_id):
|
||||
# 是否在复读中
|
||||
if len(msgs) >= config.repeat_threshold and all(
|
||||
item['raw_message'] == self.message.raw_message for item in msgs[-config.repeat_threshold + 1:]):
|
||||
# 说明当前群里正在复读
|
||||
group_bot_replies = LearningChat.reply_cache[self.message.group_id][self.message.bot_id]
|
||||
if len(group_bot_replies) and group_bot_replies[-1]['reply'] != self.message.raw_message:
|
||||
return [self.message.raw_message, ], self.message.keywords
|
||||
else:
|
||||
# 已经复读过了,不回复
|
||||
return None
|
||||
if not (context := await Context.get_or_none(keywords=self.message.keywords)):
|
||||
return None
|
||||
|
||||
# 喝醉了的处理,先不做了
|
||||
answer_threshold_choice_list = list(
|
||||
range(config.answer_threshold - len(config.answer_threshold_weights) + 1, config.answer_threshold + 1))
|
||||
answer_count_threshold = random.choices(answer_threshold_choice_list, weights=config.answer_threshold_weights)[
|
||||
0]
|
||||
if self.message.keywords_len == config.KEYWORDS_SIZE:
|
||||
answer_count_threshold -= 1
|
||||
|
||||
cross_group_threshold = 1 if self.message.to_me else config.cross_group_threshold
|
||||
ban_keywords = await LearningChat._get_ban_keywords(context, self.message.group_id)
|
||||
|
||||
candidate_answers: Dict[str, Answer] = {}
|
||||
other_group_cache: Dict[str, Answer] = {}
|
||||
answers_count = defaultdict(int)
|
||||
|
||||
def candidate_append(dst: Dict[str, Answer], answer_: Answer):
|
||||
if answer_.keywords not in dst:
|
||||
dst[answer_.keywords] = answer_
|
||||
else:
|
||||
dst[answer_.keywords].count += answer_.count
|
||||
dst[answer_.keywords].messages += answer_.messages
|
||||
|
||||
for answer in context.answers:
|
||||
if answer.count < answer_count_threshold:
|
||||
continue
|
||||
if answer.keywords in ban_keywords:
|
||||
continue
|
||||
sample_msg = answer.messages[0]
|
||||
if self.message.is_image and '[CQ:' not in sample_msg:
|
||||
# 图片消息不回复纯文本
|
||||
continue
|
||||
if not self.message.to_me and sample_msg.startswith(NICKNAME):
|
||||
continue
|
||||
if sample_msg.startswith(('[CQ:xml', '[CQ:json', '[CQ:at')):
|
||||
# 不学xml、json和at
|
||||
continue
|
||||
|
||||
if answer.group_id == self.message.group_id:
|
||||
candidate_append(candidate_answers, answer)
|
||||
else:
|
||||
answers_count[answer.keywords] += 1
|
||||
cur_count = answers_count[answer.keywords]
|
||||
if cur_count < cross_group_threshold:
|
||||
candidate_append(other_group_cache, answer)
|
||||
elif cur_count == cross_group_threshold:
|
||||
if cur_count > 1:
|
||||
candidate_append(candidate_answers, other_group_cache[answer.keywords])
|
||||
candidate_append(candidate_answers, answer)
|
||||
else:
|
||||
candidate_append(candidate_answers, answer)
|
||||
if not candidate_answers:
|
||||
return None
|
||||
|
||||
final_answer = random.choices(list(candidate_answers.values()),
|
||||
weights=[min(answer.count, 10) for answer in candidate_answers.values()])[0]
|
||||
answer_str = random.choice(final_answer.messages)
|
||||
answer_keywords = final_answer.keywords
|
||||
|
||||
if 0 < answer_str.count(',') <= 3 and '[CQ:' not in answer_str and random.random() < config.split_probability:
|
||||
return answer_str.split(','), answer_keywords
|
||||
return [answer_str, ], answer_keywords
|
||||
|
||||
async def _update_context(self, pre_msg):
|
||||
if not pre_msg:
|
||||
return
|
||||
|
||||
# 在复读,不学
|
||||
if pre_msg['raw_message'] == self.message.raw_message:
|
||||
return
|
||||
# 回复别人的,不学
|
||||
if '[CQ:reply' in self.message.raw_message:
|
||||
return
|
||||
if context := await Context.get_or_none(keywords=pre_msg['keywords']):
|
||||
context.count += 1
|
||||
context.time = self.message.time
|
||||
answer_index = next((idx for idx, answer in enumerate(context.answers)
|
||||
if answer.group_id == self.message.group_id
|
||||
and answer.keywords == self.message.keywords), -1)
|
||||
if answer_index == -1:
|
||||
context.answers.append(
|
||||
Answer(
|
||||
keywords=self.message.keywords,
|
||||
group_id=self.message.group_id,
|
||||
count=1,
|
||||
time=self.message.time,
|
||||
messages=[self.message.raw_message]
|
||||
)
|
||||
)
|
||||
else:
|
||||
context.answers[answer_index].count += 1
|
||||
context.answers[answer_index].time = self.message.time
|
||||
if self.message.is_plain_text:
|
||||
context.answers[answer_index].messages.append(self.message.raw_message)
|
||||
await context.save()
|
||||
else:
|
||||
answer = Answer(
|
||||
keywords=self.message.keywords,
|
||||
group_id=self.message.group_id,
|
||||
count=1,
|
||||
time=self.message.time,
|
||||
messages=[self.message.raw_message]
|
||||
)
|
||||
await Context.create(keywords=pre_msg['keywords'],
|
||||
time=self.message.time,
|
||||
count=1,
|
||||
answers=Answers(answers=[answer]))
|
||||
|
||||
async def _update_message(self):
|
||||
with LearningChat._message_lock:
|
||||
LearningChat.message_cache[self.message.group_id].append(
|
||||
{
|
||||
'group_id': self.message.group_id,
|
||||
'user_id': self.message.user_id,
|
||||
'raw_message': self.message.raw_message,
|
||||
'is_plain_text': self.message.is_plain_text,
|
||||
'plain_text': self.message.plain_text,
|
||||
'keywords': self.message.keywords,
|
||||
'time': self.message.time,
|
||||
}
|
||||
)
|
||||
|
||||
cur_time = self.message.time
|
||||
if LearningChat._late_save_time == 0:
|
||||
LearningChat._late_save_time = cur_time - 1
|
||||
return
|
||||
|
||||
if len(LearningChat.message_cache[self.message.group_id]) > config.save_count_threshold:
|
||||
await LearningChat.persistence(cur_time)
|
||||
elif cur_time - LearningChat._late_save_time > config.save_time_threshold:
|
||||
await LearningChat.persistence(cur_time)
|
||||
|
||||
@staticmethod
|
||||
async def _get_ban_keywords(context: Context, group_id: int) -> set:
|
||||
"""
|
||||
找到在 group_id 群中对应 context 不能回复的关键词
|
||||
"""
|
||||
ban_keywords, _ = await BlackList.get_or_create(group_id=group_id)
|
||||
if context.ban:
|
||||
ban_count = defaultdict(int)
|
||||
for ban in context.ban:
|
||||
ban_key = ban.keywords
|
||||
if ban.group_id == group_id:
|
||||
ban_keywords.answers.append(ban_key)
|
||||
else:
|
||||
ban_count[ban_key] += 1
|
||||
if ban_count[ban_key] == config.cross_group_threshold:
|
||||
ban_keywords.answers.append(ban_key)
|
||||
await ban_keywords.save()
|
||||
return set(ban_keywords.answers)
|
||||
|
||||
@staticmethod
|
||||
async def clear_up_context():
|
||||
"""
|
||||
清理所有超过 15 天没人说、且没有学会的话
|
||||
"""
|
||||
cur_time = int(time.time())
|
||||
expiration = cur_time - 15 * 24 * 3600 # 15 天前
|
||||
await Context.filter(time__lt=expiration, count__lt=config.answer_threshold).delete()
|
||||
contexts = await Context.filter(count__gt=100, clear_time__lt=expiration).all()
|
||||
for context in contexts:
|
||||
answers = [answer
|
||||
for answer in context.answers
|
||||
if answer.count > 1 or answer.time > expiration]
|
||||
context.answers = answers
|
||||
context.clear_time = cur_time
|
||||
await context.save()
|
@ -14,7 +14,7 @@ from nonebot.typing import T_State
|
||||
|
||||
from LittlePaimon import NICKNAME, SUPERUSERS
|
||||
from LittlePaimon.utils import scheduler, logger
|
||||
from LittlePaimon.utils.message import format_message
|
||||
from LittlePaimon.utils.message import format_message, replace_all
|
||||
from LittlePaimon.manager.plugin_manager import plugin_manager as pm
|
||||
from .config import config
|
||||
|
||||
@ -46,7 +46,7 @@ async def IncreaseRule(event: NoticeEvent) -> bool:
|
||||
|
||||
|
||||
approve_request = on_command('同意', priority=1, block=True, permission=SUPERUSER)
|
||||
ban_greet = on_regex(r'入群欢迎(?P<type>开启|启用|关闭|禁用)(?P<target>.+)', priority=1, block=True, permission=SUPERUSER)
|
||||
ban_greet = on_command('入群欢迎', priority=1, block=True, permission=SUPERUSER)
|
||||
requests = on_request(priority=1, rule=Rule(InviteRule), block=True)
|
||||
notices = on_notice(priority=1, rule=Rule(IncreaseRule), block=True)
|
||||
|
||||
@ -159,9 +159,17 @@ async def _(bot: Bot, event: GroupIncreaseNoticeEvent):
|
||||
|
||||
|
||||
@ban_greet.handle()
|
||||
async def _(event: MessageEvent, regex_dict: dict = RegexDict()):
|
||||
type = regex_dict['type']
|
||||
target = regex_dict['target'].split(' ')
|
||||
async def _(event: MessageEvent, msg: Message = CommandArg()):
|
||||
msg = msg.extract_plain_text().strip()
|
||||
if any(i in msg for i in {'开启', '启用', '打开', 'on'}):
|
||||
type = True
|
||||
target = replace_all(msg, ['开启', '启用', '打开', 'on'])
|
||||
elif any(i in msg for i in {'禁用', '关闭', 'off'}):
|
||||
type = False
|
||||
target = replace_all(msg, ['禁用', '关闭', 'off'])
|
||||
else:
|
||||
await ban_greet.finish('指令格式错误,应为[入群欢迎启用|禁用 群号]')
|
||||
return
|
||||
if any(i in target for i in {'全部', 'all', '所有'}):
|
||||
target = ['全部']
|
||||
else:
|
||||
@ -173,14 +181,14 @@ async def _(event: MessageEvent, regex_dict: dict = RegexDict()):
|
||||
target = [event.group_id]
|
||||
for t in target:
|
||||
if t == '全部':
|
||||
config.group_ban = ['全部'] if type in {'禁用', '关闭'} else []
|
||||
elif type in {'禁用', '关闭'}:
|
||||
config.group_ban = ['全部'] if type else []
|
||||
elif not type:
|
||||
if t not in config.group_ban:
|
||||
config.group_ban.append(t)
|
||||
elif t in config.group_ban:
|
||||
config.group_ban.remove(t)
|
||||
config.save()
|
||||
await ban_greet.finish(f'已{type}群{" ".join(target)}的群欢迎')
|
||||
await ban_greet.finish(f'已{"启用" if type else "禁用"}群{" ".join(target)}的群欢迎')
|
||||
|
||||
|
||||
@scheduler.scheduled_job('cron', hour='*/1')
|
||||
|
@ -37,12 +37,12 @@ show_log = on_command('模拟抽卡记录', aliases={'查看模拟抽卡记录'}
|
||||
'pm_usage': '模拟抽卡记录[角色|武器]',
|
||||
'pm_priority': 2
|
||||
})
|
||||
delete_log = on_command('删除模拟抽卡记录', priority=13, block=True, state={
|
||||
'pm_name': '删除模拟抽卡记录',
|
||||
'pm_description': '删除你的模拟抽卡记录',
|
||||
'pm_usage': '删除模拟抽卡记录',
|
||||
'pm_priority': 3
|
||||
})
|
||||
# delete_log = on_command('删除模拟抽卡记录', priority=13, block=True, state={
|
||||
# 'pm_name': '删除模拟抽卡记录',
|
||||
# 'pm_description': '删除你的模拟抽卡记录',
|
||||
# 'pm_usage': '删除模拟抽卡记录',
|
||||
# 'pm_priority': 3
|
||||
# })
|
||||
# show_dg = on_command('查看定轨', priority=13, block=True, state={
|
||||
# 'pm_name': '查看定轨',
|
||||
# 'pm_description': '查看当前定轨的武器',
|
||||
|
@ -9,16 +9,6 @@ info_file = load_json(JSON_DATA / 'genshin_info.json')
|
||||
weapon_file = load_json(JSON_DATA / 'weapon.json')
|
||||
|
||||
|
||||
def get_short_name(name: str) -> str:
|
||||
"""
|
||||
获取角色或武器的短名(2个字)
|
||||
:param name: 角色或武器名
|
||||
:return: 短名字符串
|
||||
"""
|
||||
short_name = load_json(JSON_DATA / 'short_name.json')
|
||||
return name if name not in short_name.keys() else short_name[name]
|
||||
|
||||
|
||||
def get_id_by_name(name: str) -> Optional[str]:
|
||||
"""
|
||||
根据角色名字获取角色的id
|
||||
|
@ -69,10 +69,15 @@ text_filter = DFAFilter()
|
||||
text_filter.parse(JSON_DATA / 'ban_word.txt')
|
||||
|
||||
|
||||
def filter_msg(message: Union[Message, str]):
|
||||
def filter_msg(message: Union[Message, str], repl: str = "*"):
|
||||
"""
|
||||
过滤违禁词
|
||||
:param message: 过滤的消息
|
||||
:param repl: 替换词
|
||||
"""
|
||||
if isinstance(message, str):
|
||||
return text_filter.filter(message)
|
||||
elif isinstance(message, Message):
|
||||
for seg in message['text']:
|
||||
seg.data['text'] = text_filter.filter(seg.data.get('text', ''))
|
||||
seg.data['text'] = text_filter.filter(seg.data.get('text', ''), repl)
|
||||
return message
|
||||
|
128
poetry.lock
generated
128
poetry.lock
generated
@ -459,9 +459,9 @@ url = "https://mirrors.aliyun.com/pypi/simple"
|
||||
reference = "ali"
|
||||
|
||||
[[package]]
|
||||
name = "jieba-fast"
|
||||
version = "0.53"
|
||||
description = "Use C and Swig to Speed up jieba<Chinese Words Segementation Utilities>"
|
||||
name = "jieba"
|
||||
version = "0.42.1"
|
||||
description = "Chinese Words Segmentation Utilities"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
@ -942,28 +942,6 @@ type = "legacy"
|
||||
url = "https://mirrors.aliyun.com/pypi/simple"
|
||||
reference = "ali"
|
||||
|
||||
[[package]]
|
||||
name = "pymongo"
|
||||
version = "4.2.0"
|
||||
description = "Python driver for MongoDB <http://www.mongodb.org>"
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
|
||||
[package.extras]
|
||||
aws = ["pymongo-auth-aws (<2.0.0)"]
|
||||
encryption = ["pymongocrypt (>=1.3.0,<2.0.0)"]
|
||||
gssapi = ["pykerberos"]
|
||||
ocsp = ["pyopenssl (>=17.2.0)", "requests (<3.0.0)", "service_identity (>=18.1.0)"]
|
||||
snappy = ["python-snappy"]
|
||||
srv = ["dnspython (>=1.16.0,<3.0.0)"]
|
||||
zstd = ["zstandard"]
|
||||
|
||||
[package.source]
|
||||
type = "legacy"
|
||||
url = "https://mirrors.aliyun.com/pypi/simple"
|
||||
reference = "ali"
|
||||
|
||||
[[package]]
|
||||
name = "pyparsing"
|
||||
version = "3.0.9"
|
||||
@ -1276,19 +1254,6 @@ type = "legacy"
|
||||
url = "https://mirrors.aliyun.com/pypi/simple"
|
||||
reference = "ali"
|
||||
|
||||
[[package]]
|
||||
name = "sqlitedict"
|
||||
version = "2.0.0"
|
||||
description = "Persistent dict in Python, backed up by sqlite3 and pickle, multithread-safe."
|
||||
category = "main"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
|
||||
[package.source]
|
||||
type = "legacy"
|
||||
url = "https://mirrors.aliyun.com/pypi/simple"
|
||||
reference = "ali"
|
||||
|
||||
[[package]]
|
||||
name = "starlette"
|
||||
version = "0.19.1"
|
||||
@ -1520,7 +1485,7 @@ reference = "ali"
|
||||
|
||||
[[package]]
|
||||
name = "uvicorn"
|
||||
version = "0.18.2"
|
||||
version = "0.18.3"
|
||||
description = "The lightning-fast ASGI server."
|
||||
category = "main"
|
||||
optional = false
|
||||
@ -1532,13 +1497,13 @@ colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win
|
||||
h11 = ">=0.8"
|
||||
httptools = {version = ">=0.4.0", optional = true, markers = "extra == \"standard\""}
|
||||
python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
|
||||
PyYAML = {version = ">=5.1", optional = true, markers = "extra == \"standard\""}
|
||||
pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""}
|
||||
uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
|
||||
watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
|
||||
websockets = {version = ">=10.0", optional = true, markers = "extra == \"standard\""}
|
||||
|
||||
[package.extras]
|
||||
standard = ["websockets (>=10.0)", "httptools (>=0.4.0)", "watchfiles (>=0.13)", "python-dotenv (>=0.13)", "PyYAML (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "colorama (>=0.4)"]
|
||||
standard = ["colorama (>=0.4)", "httptools (>=0.4.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.0)"]
|
||||
|
||||
[package.source]
|
||||
type = "legacy"
|
||||
@ -1671,7 +1636,7 @@ reference = "ali"
|
||||
[metadata]
|
||||
lock-version = "1.1"
|
||||
python-versions = "^3.8"
|
||||
content-hash = "0e480c5abb5e122c5cf6787d5abfc9f6aa90907fbb315152f1ba7b6f74b408e3"
|
||||
content-hash = "a7603c618d0b9561d0342a729f5a0b4f249a9e1c7f56ac78322c284aea343626"
|
||||
|
||||
[metadata.files]
|
||||
aiofiles = [
|
||||
@ -1877,8 +1842,8 @@ iso8601 = [
|
||||
{file = "iso8601-1.0.2-py3-none-any.whl", hash = "sha256:d7bc01b1c2a43b259570bb307f057abc578786ea734ba2b87b836c5efc5bd443"},
|
||||
{file = "iso8601-1.0.2.tar.gz", hash = "sha256:27f503220e6845d9db954fb212b95b0362d8b7e6c1b2326a87061c3de93594b1"},
|
||||
]
|
||||
jieba-fast = [
|
||||
{file = "jieba_fast-0.53.tar.gz", hash = "sha256:e92089d52faa91d51b6a7c1e6e4c4c85064a0e36f6a29257af2254b9e558ddd0"},
|
||||
jieba = [
|
||||
{file = "jieba-0.42.1.tar.gz", hash = "sha256:055ca12f62674fafed09427f176506079bc135638a14e23e25be909131928db2"},
|
||||
]
|
||||
jinja2 = [
|
||||
{file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
|
||||
@ -2395,74 +2360,6 @@ pymdown-extensions = [
|
||||
{file = "pymdown_extensions-9.5-py3-none-any.whl", hash = "sha256:ec141c0f4983755349f0c8710416348d1a13753976c028186ed14f190c8061c4"},
|
||||
{file = "pymdown_extensions-9.5.tar.gz", hash = "sha256:3ef2d998c0d5fa7eb09291926d90d69391283561cf6306f85cd588a5eb5befa0"},
|
||||
]
|
||||
pymongo = [
|
||||
{file = "pymongo-4.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1c81414b706627f15e921e29ae2403aab52e33e36ed92ed989c602888d7c3b90"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux1_i686.whl", hash = "sha256:c549bb519456ee230e92f415c5b4d962094caac0fdbcc4ed22b576f66169764e"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:70216ec4c248213ae95ea499b6314c385ce01a5946c448fb22f6c8395806e740"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux2014_i686.whl", hash = "sha256:8a86e8c2ac2ec87141e1c6cb00bdb18a4560f06e5f96769abcd1dda24dc0e764"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux2014_ppc64le.whl", hash = "sha256:314b556afd72eb21a6a10bd1f45ef252509f014f80207db59c97372103c88237"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux2014_s390x.whl", hash = "sha256:902e2c9030cb042c49750bc70d72d830d42c64ea0df5ff8630c171e065c93dd7"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux2014_x86_64.whl", hash = "sha256:c69ef5906dcd6ec565d4d887ba97ceb2a84f3b614307ee3b4780cb1ea40b1867"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07564178ecc203a84f63e72972691af6c0c82d2dc0c9da66ba711695276089ba"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47d5f10922cf7f7dfcd1406bd0926cef6d866a75953c3745502dffd7ac197dd"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cadaaa5c19ad23fc84559e90284f2eb003c36958ebb2c06f286b678f441285f"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d94f535df9f539615bc3dbbef185ded3b609373bb44ca1afffcabac70202678a"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:147a23cd96feb67606ac957744d8d25b013426cdc3c7164a4f99bd8253f649e3"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-win32.whl", hash = "sha256:ecdcb0d4e9b08b739035f57a09330efc6f464bd7f942b63897395d996ca6ebd5"},
|
||||
{file = "pymongo-4.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:8c223aea52c359cc8fdee5bd3475532590755c269ec4d4fe581acd47a44e9952"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:fe0820d169635e41c14a5d21514282e0b93347878666ec9d5d3bf0eed0649948"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:e39cacee70a98758f9b2da53ee175378f07c60113b1fa4fae40cbaee5583181e"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:701d331060dae72bf3ebdb82924405d14136a69282ccb00c89fc69dee21340b4"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:e08fe1731f5429435b8dea1db9663f9ed1812915ff803fc9991c7c4841ed62ad"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:60c470a58c5b62b1b12a5f5458f8e2f2f67b94e198d03dc5352f854d9230c394"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:b211e161b6cc2790e0d640ad38e0429d06c944e5da23410f4dc61809dba25095"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:ed90a9de4431cbfb2f3b2ef0c5fd356e61c85117b2be4db3eae28cb409f6e2d5"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:68e1e49a5675748233f7b05330f092582cd52f2850b4244939fd75ba640593ed"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:764fc15418d94bce5c2f8ebdbf66544f96f42efb1364b61e715e5b33281b388d"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e64442aba81ed4df1ca494b87bf818569a1280acaa73071c68014f7a884e83f1"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83168126ae2457d1a19b2af665cafa7ef78c2dcff192d7d7b5dad6b36c73ae24"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69d0180bca594e81cdb4a2af328bdb4046f59e10aaeef7619496fe64f2ec918c"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80cbf0b043061451660099fff9001a7faacb2c9c983842b4819526e2f944dc6c"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e1b8f5e2f9637492b0da4d51f78ecb17786e61d6c461ead8542c944750faf4f9"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1a957cdc2b26eeed4d8f1889a40c6023dd1bd94672dd0f5ce327314f2caaefd4"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-win32.whl", hash = "sha256:6bd5888997ea3eae9830c6cc7964b61dcfbc50eb3a5a6ce56ad5f86d5579b11c"},
|
||||
{file = "pymongo-4.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:dc24737d24ce0de762bee9c2a884639819485f679bbac8ab5be9c161ef6f9b2c"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:193cc97d44b1e6d2253ea94e30c6f94f994efb7166e2452af4df55825266e88b"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e152c26ffc30331e9d57591fc4c05453c209aa20ba299d1deb7173f7d1958c22"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8a9bc4dcfc2bda69ee88cdb7a89b03f2b8eca668519b704384a264dea2db4209"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8cbb868e88c4eee1c53364bb343d226a3c0e959e791e6828030cb78f46cfcbe3"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:2bfe6b59f431f40fa545547616f4acf0c0c4b64518b1f951083e3bad06eb368b"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:ff66014687598823b6b23751884b4aa67eb934445406d95894dfc60cb7bfcc18"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:31c50da4a080166bc29403aa91f4c76e0889b4f24928d1b60508a37c1bf87f9a"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:ccfdc7722df445c49dc6b5d514c3544cad99b53189165f7546793933050ac7fb"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc7ebc37b03956a070260665079665eae69e5e96007694214f3a2107af96816a"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c8b4a782aac43948308087b962c9ecb030ba98886ce6dee3ad7aafe8c5e1ce80"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1c23527f8e13f526fededbb96f2e7888f179fe27c51d41c2724f7059b75b2fa"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83cc3c35aeeceb67143914db67f685206e1aa37ea837d872f4bc28d7f80917c9"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e09cdf5aad507c8faa30d97884cc42932ed3a9c2b7f22cc3ccc607bae03981b3"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0f53253f4777cbccc426e669a2af875f26c95bd090d88593287b9a0a8ac7fa25"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21238b19243a42f9a34a6d39e7580ceebc6da6d2f3cf729c1cff9023cb61a5f1"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-win32.whl", hash = "sha256:766acb5b1a19eae0f7467bcd3398748f110ea5309cdfc59faa5185dcc7fd4dca"},
|
||||
{file = "pymongo-4.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:10f09c4f09757c2e2a707ad7304f5d69cb8fdf7cbfb644dbacfe5bbe8afe311b"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a6bf01b9237f794fa3bdad5089474067d28be7e199b356a18d3f247a45775f26"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d8bb745321716e7a11220a67c88212ecedde4021e1de4802e563baef9df921d2"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:3be53e9888e759c49ae35d747ff77a04ff82b894dd64601e0f3a5a159b406245"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a3efdf154844244e0dabe902cf1827fdced55fa5b144adec2a86e5ce50a99b97"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:a7eb5b06744b911b6668b427c8abc71b6d624e72d3dfffed00988fa1b4340f97"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:b0be613d926c5dbb0d3fc6b58e4f2be4979f80ae76fda6e47309f011b388fe0c"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:e7dcb73f683c155885a3488646fcead3a895765fed16e93c9b80000bc69e96cb"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:b537dd282de1b53d9ae7cf9f3df36420c8618390f2da92100391f3ba8f3c141a"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d98d2a8283c9928a9e5adf2f3c0181e095579e9732e1613aaa55d386e2bcb6c5"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76892bbce743eb9f90360b3626ea92f13d338010a1004b4488e79e555b339921"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:124d0e880b66f9b0778613198e89984984fdd37a3030a9007e5f459a42dfa2d3"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:773467d25c293f8e981b092361dab5fd800e1ba318403b7959d35004c67faedc"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6673ab3fbf3135cc1a8c0f70d480db5b2378c3a70af8d602f73f76b8338bdf97"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:153b8f8705970756226dfeeb7bb9637e0ad54a4d79b480b4c8244e34e16e1662"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:01721da74558f2f64a9f162ee063df403ed656b7d84229268d8e4ae99cfba59c"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-win32.whl", hash = "sha256:a25c0eb2d610b20e276e684be61c337396813b636b69373c17314283cb1a3b14"},
|
||||
{file = "pymongo-4.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:44b36ccb90aac5ea50be23c1a6e8f24fbfc78afabdef114af16c6e0a80981364"},
|
||||
{file = "pymongo-4.2.0.tar.gz", hash = "sha256:72f338f6aabd37d343bd9d1fdd3de921104d395766bcc5cdc4039e4c2dd97766"},
|
||||
]
|
||||
pyparsing = [
|
||||
{file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
|
||||
{file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
|
||||
@ -2592,9 +2489,6 @@ soupsieve = [
|
||||
{file = "soupsieve-2.3.2.post1-py3-none-any.whl", hash = "sha256:3b2503d3c7084a42b1ebd08116e5f81aadfaea95863628c80a3b774a11b7c759"},
|
||||
{file = "soupsieve-2.3.2.post1.tar.gz", hash = "sha256:fc53893b3da2c33de295667a0e19f078c14bf86544af307354de5fcf12a3f30d"},
|
||||
]
|
||||
sqlitedict = [
|
||||
{file = "sqlitedict-2.0.0.tar.gz", hash = "sha256:23a370416f4e1e962daa293382f3a8dbc4127e6a0abc06a5d4e58e6902f05d17"},
|
||||
]
|
||||
starlette = [
|
||||
{file = "starlette-0.19.1-py3-none-any.whl", hash = "sha256:5a60c5c2d051f3a8eb546136aa0c9399773a689595e099e0877704d5888279bf"},
|
||||
{file = "starlette-0.19.1.tar.gz", hash = "sha256:c6d21096774ecb9639acad41b86b7706e52ba3bf1dc13ea4ed9ad593d47e24c7"},
|
||||
@ -2709,8 +2603,8 @@ user-agents = [
|
||||
{file = "user_agents-2.2.0-py3-none-any.whl", hash = "sha256:a98c4dc72ecbc64812c4534108806fb0a0b3a11ec3fd1eafe807cee5b0a942e7"},
|
||||
]
|
||||
uvicorn = [
|
||||
{file = "uvicorn-0.18.2-py3-none-any.whl", hash = "sha256:c19a057deb1c5bb060946e2e5c262fc01590c6529c0af2c3d9ce941e89bc30e0"},
|
||||
{file = "uvicorn-0.18.2.tar.gz", hash = "sha256:cade07c403c397f9fe275492a48c1b869efd175d5d8a692df649e6e7e2ed8f4e"},
|
||||
{file = "uvicorn-0.18.3-py3-none-any.whl", hash = "sha256:0abd429ebb41e604ed8d2be6c60530de3408f250e8d2d84967d85ba9e86fe3af"},
|
||||
{file = "uvicorn-0.18.3.tar.gz", hash = "sha256:9a66e7c42a2a95222f76ec24a4b754c158261c4696e683b9dadc72b590e0311b"},
|
||||
]
|
||||
uvloop = [
|
||||
{file = "uvloop-0.16.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6224f1401025b748ffecb7a6e2652b17768f30b1a6a3f7b44660e5b5b690b12d"},
|
||||
|
@ -22,7 +22,6 @@ lxml = "^4.8.0"
|
||||
Pillow = "^9.1.0"
|
||||
matplotlib = "^3.5.1"
|
||||
pypinyin = "^0.46.0"
|
||||
pymongo = "^4.1.0"
|
||||
xlsxwriter = "^3.0.3"
|
||||
aiofiles = "^0.8.0"
|
||||
tortoise-orm = "^0.19.2"
|
||||
@ -31,6 +30,7 @@ tqdm = "^4.64.0"
|
||||
ujson = "^5.4.0"
|
||||
expandvars = "^0.9.0"
|
||||
pywebio = "^1.6.2"
|
||||
jieba = "^0.42.1"
|
||||
|
||||
[tool.poetry.dev-dependencies]
|
||||
nb-cli = "^0.6.7"
|
||||
|
Loading…
Reference in New Issue
Block a user