Merge pull request #80 from meatjam/nonebot2_voice_refactor

refactor(guess_voice): 新增并使用了米游社途径获取角色语音
This commit is contained in:
惜月 2022-06-27 07:13:57 -05:00 committed by GitHub
commit b46072baed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -18,6 +18,8 @@ from .util import get_path
OUT_PUT = Path() / 'data' / 'LittlePaimon' / 'guess_voice' / 'voice' OUT_PUT = Path() / 'data' / 'LittlePaimon' / 'guess_voice' / 'voice'
BASE_URL = 'https://wiki.biligame.com/ys/' BASE_URL = 'https://wiki.biligame.com/ys/'
BASE_URL_MYS = 'https://bbs.mihoyo.com'
BASE_URL_MYS_CHARACTERS_LIST = '/ys/obc/channel/map/189/25?bbs_presentation_style=no_header'
API = {'character_list': '角色', 'voice': '%s语音'} API = {'character_list': '角色', 'voice': '%s语音'}
@ -90,6 +92,66 @@ async def get_voice_info(character_name: str):
return info_list return info_list
# 获取角色语音,通过米游社途径。可获取完整的中日英韩语音。
async def get_voice_info_mys(character_name: str):
character_name = character_name.strip()
logger.info('获取数据: %s' % character_name)
html = await aiorequests.get(url=(BASE_URL_MYS + BASE_URL_MYS_CHARACTERS_LIST))
soup = BeautifulSoup(html.text, 'lxml')
soup_char_container = soup.select('.collection-avatar')[0]
url_char_page = None
for char_soup in soup_char_container.select('.collection-avatar__title'):
if char_soup.text.find(character_name) != -1:
url_char_page = char_soup.parent.attrs.get('href', None)
break
if url_char_page is None:
return None
html = await aiorequests.get(url=(BASE_URL_MYS + url_char_page))
soup = BeautifulSoup(html.text, 'lxml')
soup_voice_languages, soup_voice_lists = soup.select('[data-part="voiceTab"] > ul')
language_tab_indices = {
'': -1,
'': -1,
'': -1,
'': -1
}
for soup_lan in soup_voice_languages.select('li'):
language = soup_lan.text
language_tab_index = int(soup_lan.attrs.get('data-index'))
if language.find('') != -1 or language.find('') != -1:
language_tab_indices[''] = language_tab_index
elif language.find('') != -1:
language_tab_indices[''] = language_tab_index
elif language.find('') != -1:
language_tab_indices[''] = language_tab_index
elif language.find('') != -1:
language_tab_indices[''] = language_tab_index
language_voices = {
'': [],
'': [],
'': [],
'': []
}
for lan, voice_list in language_voices.items():
for soup_row in soup_voice_lists.select(f'li[data-index="{language_tab_indices[lan]}"] > table:nth-of-type(2) > tbody > tr'):
soup_source = soup_row.select('audio > source')
voice_list.append(soup_source[0].attrs.get('src') if len(soup_source) != 0 else '')
info_list = []
soup_title = soup_voice_lists.select('li:first-child > table:nth-of-type(2) > tbody > tr td:nth-child(1)')
soup_text = soup_voice_lists.select('li:first-child > table:nth-of-type(2) > tbody > tr td:nth-child(2) > div > span')
for index in range(len(soup_title)):
info_list.append({
'title': soup_title[index].text.strip(),
'text': soup_text[index].text.strip(),
'': language_voices[''][index],
'': language_voices[''][index],
'': language_voices[''][index],
'': language_voices[''][index],
})
return info_list
# 下载音频文件到本地 # 下载音频文件到本地
async def download(url, path): async def download(url, path):
res = await aiorequests.get(url=url, timeout=30) res = await aiorequests.get(url=url, timeout=30)
@ -102,7 +164,7 @@ async def update_voice_data():
# 获取全部人物列表 # 获取全部人物列表
char_list = await get_character_list() char_list = await get_character_list()
for char in char_list: for char in char_list:
info = await get_voice_info(char) info = await get_voice_info_mys(char)
if not info: if not info:
continue continue
data = [] data = []