diff --git a/about.md b/about.md index d6c2ec83..eaee4b9e 100644 --- a/about.md +++ b/about.md @@ -221,6 +221,24 @@ Contribute to the project to help with its ongoing maintenance - M*u(LiuVfx) / 2024-4-23 捐助 6.66 元 - *波(支付宝) / 2024-4-23 捐助 88.88 元 - *华 / 2024-4-24 捐助 30 元 +- *OBJ / 2024-4-26 捐助 1 元 +- M*i / 2024-4-28 捐助 28 元 +- **彬(支付宝) / 2024-4-28 捐助 20 元 +- *籽 / 2024-4-29 捐助 1 元 +- *笑 / 2024-5-1 捐助 2 元 +- **豹(支付宝) / 2024-5-1 捐助 11 元 +- **豹(支付宝) / 2024-5-1 捐助 10 元 +- *磊 / 2024-5-4 捐助 2 元 +- Anthony Tran(ko-fi) / 2024-5-4 捐助 $20 +- *蟹 / 2024-5-6 捐助 20 元 +- x*y / 2024-5-6 捐助 0.5 元 +- *曹 / 2024-5-7 捐助 6 元 +- *维 / 2024-5-8 捐助 50 元 +- *林 / 2024-5-9 捐助 20 元 +- **璨(支付宝) / 2024-5-9 捐助 5 元 +- *易 / 2024-5-10 捐助 5 元 +- **文(支付宝) / 2024-5-10 捐助 100 元 + **未标注付款方式的即为微信支付,括号内标注为GitHub用户名,感谢所有支持者,软件的每一点进步都离不开您的支持和帮助。** diff --git a/requirements.txt b/requirements.txt index bc406674..82b2ab34 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,7 +20,7 @@ click==7.1.2 colorama==0.4.6 coloredlogs==15.0.1 contourpy==1.1.1 -ctranslate2==4.0.0 +ctranslate2==3.24.0 cycler==0.12.1 decorator==4.4.2 deepl==1.16.1 diff --git a/sp.py b/sp.py index b2bb75df..c7244095 100644 --- a/sp.py +++ b/sp.py @@ -62,6 +62,7 @@ def run(self): et = time.time() self.close() print(f'启动用时:{et - st}') + print(f'代理='+(os.environ.get('http_proxy','') or os.environ.get('https_proxy',''))) if not nostyle.exists(): import videotrans.ui.dark.darkstyle_rc with open('./videotrans/styles/style.qss', 'r', encoding='utf-8') as f: diff --git a/version.json b/version.json index 35ed05eb..61338b62 100644 --- a/version.json +++ b/version.json @@ -1,4 +1,4 @@ { - "version": "1.73", - "version_num": 11073 + "version": "1.75", + "version_num": 11075 } diff --git a/videotrans/__init__.py b/videotrans/__init__.py index 85c12004..1747bde5 100644 --- a/videotrans/__init__.py +++ b/videotrans/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -VERSION="v1.74" -VERSION_NUM=110704 \ No newline at end of file +VERSION="v1.75" +VERSION_NUM=110705 \ No newline at end of file diff --git a/videotrans/box/win.py b/videotrans/box/win.py index 1cf19b81..e9510487 100644 --- a/videotrans/box/win.py +++ b/videotrans/box/win.py @@ -573,7 +573,7 @@ def shibie_start_fun(self): if not allow: self.is_cuda.setChecked(False) return QMessageBox.critical(self, config.transobj['anerror'], config.transobj["nocudnn"]) - if model_type == 'faster': + if model_type == 'faster' and model.find('/')==-1: file = f'{config.rootdir}/models/models--Systran--faster-whisper-{model}/snapshots' if model.startswith('distil'): file = f'{config.rootdir}/models/models--Systran--faster-{model}/snapshots' diff --git a/videotrans/configure/config.py b/videotrans/configure/config.py index 1295dc2e..c18b0c7d 100644 --- a/videotrans/configure/config.py +++ b/videotrans/configure/config.py @@ -94,6 +94,7 @@ def parse_init(): with file.open('r', encoding="utf-8") as f: # 遍历.ini文件中的每个section for it in f.readlines(): + it = it.strip() if not it or it.startswith(';'): continue @@ -126,6 +127,8 @@ def parse_init(): # 初始化一个字典变量 settings = parse_init() + + # default language 如果 ini中设置了,则直接使用,否则自动判断 if settings['lang']: defaulelang = settings['lang'].lower() @@ -164,24 +167,12 @@ def parse_init(): # box窗口 queuebox_logs = Queue(1000) + + + model_list=settings['model_list'].split(',') -# model_list=[ -# "tiny", -# "tiny.en", -# "base", -# "base.en", -# "small", -# "small.en", -# "medium", -# "medium.en", -# "large-v1", -# "large-v2", -# "large-v3", -# "distil-whisper-small.en", -# "distil-whisper-medium.en", -# "distil-whisper-large-v2", -# "distil-whisper-large-v3" -# ] + + # 开始按钮状态 current_status = "stop" diff --git a/videotrans/mainwin/secwin.py b/videotrans/mainwin/secwin.py index cc3f57a4..ee5806d6 100644 --- a/videotrans/mainwin/secwin.py +++ b/videotrans/mainwin/secwin.py @@ -601,8 +601,6 @@ def open_url(self, title): 本软件的所有解释权均属于开发者。谨请用户在理解、同意、遵守本免责声明的前提下使用本软件。 """) - elif title == 'freechatgpt': - webbrowser.open_new_tab("https://apiskey.top") elif title == 'aihelp': webbrowser.open_new_tab("https://www.coze.cn/store/bot/7358853334134112296?panel=1") @@ -666,10 +664,6 @@ def set_translate_type(self, name): self.main.subform.set_transapi() return config.params['translate_type'] = name - if name == translator.FREECHATGPT_NAME: - self.main.translate_label1.show() - else: - self.main.translate_label1.hide() except Exception as e: QMessageBox.critical(self.main, config.transobj['anerror'], str(e)) @@ -715,6 +709,8 @@ def model_type_change(self): def check_whisper_model(self, name): if self.main.model_type.currentIndex() in [2,3]: return True + if name.find('/')>0: + return True slang = self.main.source_language.currentText() if name.endswith('.en') and translator.get_code(show_text=slang) != 'en': QMessageBox.critical(self.main, config.transobj['anerror'], config.transobj['enmodelerror']) diff --git a/videotrans/mainwin/spwin.py b/videotrans/mainwin/spwin.py index 1c5886a4..e671450c 100644 --- a/videotrans/mainwin/spwin.py +++ b/videotrans/mainwin/spwin.py @@ -14,7 +14,7 @@ from videotrans.task.get_role_list import GetRoleWorker from videotrans.util import tools -from videotrans.translator import TRANSNAMES, FREECHATGPT_NAME +from videotrans.translator import TRANSNAMES from videotrans.configure import config from videotrans import VERSION from videotrans.component.controlobj import TextGetdir @@ -110,10 +110,7 @@ def initUI(self): # translation type self.translate_type.addItems(TRANSNAMES) - translate_name = config.params['translate_type'] if config.params['translate_type'] in TRANSNAMES else \ - TRANSNAMES[0] - if translate_name == FREECHATGPT_NAME: - self.translate_label1.show() + translate_name = config.params['translate_type'] if config.params['translate_type'] in TRANSNAMES else TRANSNAMES[0] self.translate_type.setCurrentText(translate_name) @@ -125,7 +122,8 @@ def initUI(self): d = {"all": 0, 'split': 1, "avg": 2, "": 0} self.whisper_type.setCurrentIndex(d[config.params['whisper_type']]) self.whisper_model.addItems(config.model_list) - self.whisper_model.setCurrentText(config.params['whisper_model']) + if config.params['whisper_model'] in config.model_list: + self.whisper_model.setCurrentText(config.params['whisper_model']) if config.params['model_type'] == 'openai': self.model_type.setCurrentIndex(1) elif config.params['model_type'] == 'GoogleSpeech': @@ -249,7 +247,6 @@ def bind_action(self): self.export_sub.setToolTip( config.transobj['When subtitles exist, the subtitle content can be saved to a local SRT file']) - self.translate_label1.clicked.connect(lambda: self.util.open_url('freechatgpt')) self.set_line_role.clicked.connect(self.subform.set_line_role_fun) self.set_line_role.setCursor(Qt.PointingHandCursor) @@ -444,8 +441,6 @@ def get_setting(self): config.clone_voicelist = self.settings.value("clone_voicelist", "").split(',') config.params["chatgpt_model"] = self.settings.value("chatgpt_model", config.params['chatgpt_model']) - if config.params["chatgpt_model"] == 'large': - config.params["chatgpt_model"] = 'large-v3' os.environ['OPENAI_API_KEY'] = config.params["chatgpt_key"] config.params["ttsapi_url"] = self.settings.value("ttsapi_url", "") @@ -469,6 +464,8 @@ def get_setting(self): config.params["elevenlabstts_key"] = self.settings.value("elevenlabstts_key", "") config.params['translate_type'] = self.settings.value("translate_type", config.params['translate_type']) + if config.params['translate_type']=='FreeChatGPT': + config.params['translate_type']='FreeGoogle' config.params['subtitle_type'] = self.settings.value("subtitle_type", config.params['subtitle_type'], int) config.proxy = self.settings.value("proxy", "", str) config.params['voice_rate'] = self.settings.value("voice_rate", config.params['voice_rate'].replace('%','').replace('+',''), str) diff --git a/videotrans/recognition/all.py b/videotrans/recognition/all.py index ba0ba5c0..de53f530 100644 --- a/videotrans/recognition/all.py +++ b/videotrans/recognition/all.py @@ -11,24 +11,31 @@ def recogn(*, detect_language=None, audio_file=None, cache_folder=None, - model_name="base", + model_name="tiny", set_p=True, inst=None, is_cuda=None): if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'): return False - if set_p: - tools.set_process(f"{config.transobj['kaishishibie']}", - btnkey=inst.init['btnkey'] if inst else "") - down_root = os.path.normpath(config.rootdir + "/models") + down_root = config.rootdir + "/models" + if set_p and inst: + if model_name.find('/')>0: + if not os.path.isdir(down_root+'/models--'+model_name.replace('/','--')): + inst.parent.status_text='下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Download model from huggingface' + else: + inst.parent.status_text='加载或下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Load model from local or download model from huggingface' + else: + tools.set_process(f"{config.transobj['kaishishibie']}",btnkey=inst.init['btnkey'] if inst else "") model = None try: if model_name.startswith('distil-'): - com_type= "float32" + com_type= "default" elif is_cuda: com_type=config.settings['cuda_com_type'] else: - com_type='int8' + com_type='default' + local_res=True if model_name.find('/')==-1 else False + model = WhisperModel(model_name, device="cuda" if is_cuda else "cpu", compute_type=com_type, @@ -36,7 +43,7 @@ def recogn(*, num_workers=config.settings['whisper_worker'], cpu_threads=os.cpu_count() if int(config.settings['whisper_threads']) < 1 else int( config.settings['whisper_threads']), - local_files_only=True) + local_files_only=local_res) if config.current_status != 'ing' and config.box_recogn != 'ing': return False if not tools.vail_file(audio_file): diff --git a/videotrans/recognition/avg.py b/videotrans/recognition/avg.py index a83c8a89..3f23141e 100644 --- a/videotrans/recognition/avg.py +++ b/videotrans/recognition/avg.py @@ -41,7 +41,7 @@ def recogn(*, detect_language=None, audio_file=None, cache_folder=None, - model_name="base", + model_name="tiny", set_p=True, inst=None, is_cuda=None): @@ -72,17 +72,24 @@ def recogn(*, total_length = len(nonsilent_data) start_t = time.time() if model_name.startswith('distil-'): - com_type= "float32" + com_type= "default" elif is_cuda: com_type=config.settings['cuda_com_type'] else: - com_type='int8' + com_type='default' + local_res=True if model_name.find('/')==-1 else False + down_root=config.rootdir + "/models" + if set_p and inst and model_name.find('/')>0: + if not os.path.isdir(down_root+'/models--'+model_name.replace('/','--')): + inst.parent.status_text='下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Download model from huggingface' + else: + inst.parent.status_text='加载或下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Load model from local or download model from huggingface' model = WhisperModel( model_name, device="cuda" if config.params['cuda'] else "cpu", compute_type=com_type, - download_root=config.rootdir + "/models", - local_files_only=True) + download_root=down_root, + local_files_only=local_res) for i, duration in enumerate(nonsilent_data): if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'): del model diff --git a/videotrans/recognition/openai.py b/videotrans/recognition/openai.py index ab21dfd0..7d7a4a4b 100644 --- a/videotrans/recognition/openai.py +++ b/videotrans/recognition/openai.py @@ -37,7 +37,7 @@ def recogn(*, detect_language=None, audio_file=None, cache_folder=None, - model_name="base", + model_name="tiny", set_p=True, inst=None, is_cuda=None): diff --git a/videotrans/recognition/yuxian.py b/videotrans/recognition/yuxian.py index ffe5acb2..9286221e 100644 --- a/videotrans/recognition/yuxian.py +++ b/videotrans/recognition/yuxian.py @@ -40,10 +40,10 @@ def recogn(*, set_p=True, inst=None, is_cuda=None): - if set_p: - tools.set_process(config.transobj['fengeyinpinshuju'], btnkey=inst.init['btnkey'] if inst else "") if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'): return False + if set_p: + tools.set_process(config.transobj['fengeyinpinshuju'], btnkey=inst.init['btnkey'] if inst else "") noextname = os.path.basename(audio_file) tmp_path = f'{cache_folder}/{noextname}_tmp' if not os.path.isdir(tmp_path): @@ -69,17 +69,24 @@ def recogn(*, raw_subtitles = [] total_length = len(nonsilent_data) if model_name.startswith('distil-'): - com_type= "float32" + com_type= "default" elif is_cuda: com_type=config.settings['cuda_com_type'] else: - com_type='int8' + com_type='default' + local_res=True if model_name.find('/')==-1 else False + down_root=config.rootdir + "/models" + if set_p and inst and model_name.find('/')>0: + if not os.path.isdir(down_root+'/models--'+model_name.replace('/','--')): + inst.parent.status_text='下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Download model from huggingface' + else: + inst.parent.status_text='加载或下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Load model from local or download model from huggingface' model = WhisperModel( model_name, device="cuda" if is_cuda else "cpu", compute_type=com_type, - download_root=config.rootdir + "/models", - local_files_only=True) + download_root=down_root, + local_files_only=local_res) for i, duration in enumerate(nonsilent_data): if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'): del model diff --git a/videotrans/task/check_update.py b/videotrans/task/check_update.py index d9dbc37c..18059059 100644 --- a/videotrans/task/check_update.py +++ b/videotrans/task/check_update.py @@ -23,7 +23,7 @@ def get(self): if proxy: proxies={"http":proxy,"https":proxy} - res=requests.get("https://raw.githubusercontent.com/jianchang512/pyvideotrans/main/version.json",proxies=proxies) + res=requests.get("https://pyvideotrans.com/version.json",proxies=proxies) if res.status_code==200: d=res.json() if d['version_num']>videotrans.VERSION_NUM: diff --git a/videotrans/task/trans_create.py b/videotrans/task/trans_create.py index f56327c1..86e7d503 100644 --- a/videotrans/task/trans_create.py +++ b/videotrans/task/trans_create.py @@ -51,16 +51,7 @@ def __init__(self, config_params: dict = None, obj=None): self.obj = obj # 配置信息 self.config_params = config_params - - - # 识别是否结束 - # self.regcon_end = False - # # 翻译是否结束 - # self.trans_end = False - # # 配音是否结束 - # self.dubb_end = False - # # 合并是否结束 - # self.compose_end = False + # 进度 self.step_inst=None self.hasend=False @@ -178,8 +169,6 @@ def __init__(self, config_params: dict = None, obj=None): # 检测字幕原始语言 if self.config_params['source_language'] != '-': self.init['detect_language'] = get_audio_code(show_source=self.config_params['source_language']) - # if self.config_params['target_language'] != '-': - # self.init['subtitle_language'] = get_subtitle_code(show_target=self.config_params['target_language']) self.init['novoice_mp4'] = f"{self.init['target_dir']}/novoice.mp4" self.init['source_sub'] = f"{self.init['target_dir']}/{self.init['source_language_code']}.srt" @@ -221,15 +210,31 @@ def __init__(self, config_params: dict = None, obj=None): with open(sub_file, 'w', encoding="utf-8", errors="ignore") as f: f.write(self.config_params['subtitles'].strip()) # 如何名字不合规迁移了,并且存在原语言或目标语言字幕 - if self.config_params['app_mode']!='peiyin' and self.obj['output'] != self.obj['linshi_output']: + if self.config_params['app_mode']!='peiyin': + # 判断是否存在原始视频同名同目录的srt字幕文件 + raw_srt=Path(self.obj['raw_dirname']+f"/{self.obj['raw_noextname']}.srt") + if Path(raw_srt).is_file() and Path(raw_srt).stat().st_size>0: + config.logger.info(f'使用原始视频同目录下同名字幕文件:{raw_srt.as_posix()}') + shutil.copy2(raw_srt.as_posix(),self.init['source_sub']) + raw_source_srt=self.obj['output']+f"/{self.init['source_language_code']}.srt" - - if Path(raw_source_srt).is_file(): - shutil.copy2(raw_source_srt,self.init['source_sub']) + raw_source_srt_path=Path(raw_source_srt) + if raw_source_srt_path.is_file(): + if raw_source_srt_path.stat().st_size==0: + raw_source_srt_path.unlink(missing_ok=True) + elif self.obj['output']!=self.obj['linshi_output']: + config.logger.info(f'使用已放置到目标文件夹下的原语言字幕:{raw_source_srt}') + shutil.copy2(raw_source_srt,self.init['source_sub']) + raw_target_srt=self.obj['output']+f"/{self.init['target_language_code']}.srt" + raw_target_srt_path=Path(raw_target_srt) if Path(raw_target_srt).is_file(): - shutil.copy2(raw_target_srt,self.init['target_sub']) + if raw_target_srt_path.stat().st_size==0: + raw_target_srt_path.unlink(missing_ok=True) + elif self.obj['output']!=self.obj['linshi_output']: + config.logger.info(f'使用已放置到目标文件夹下的目标语言字幕:{raw_target_srt}') + shutil.copy2(raw_target_srt,self.init['target_sub']) @@ -250,7 +255,7 @@ def runing(): while not self.hasend: time.sleep(2) t+=2 - tools.set_process(f"{self.status_text} {t}s",btnkey=self.init['btnkey']) + tools.set_process(f"{self.status_text} {t}s",btnkey=self.init['btnkey'],nologs=True) if self.config_params['app_mode'] not in ['peiyin','tiqu']: threading.Thread(target=runing).start() diff --git a/videotrans/ui/en.py b/videotrans/ui/en.py index 6dd691bb..e1865d5d 100644 --- a/videotrans/ui/en.py +++ b/videotrans/ui/en.py @@ -104,14 +104,7 @@ def setupUi(self, MainWindow): self.layout_translate_type.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.translate_type) self.horizontalLayout_5.addLayout(self.layout_translate_type) - self.translate_label1 = QtWidgets.QPushButton(self.layoutWidget) - self.translate_label1.setCursor(Qt.PointingHandCursor) - self.translate_label1.setText(config.transobj['freechatgpt_tips']) - self.translate_label1.setToolTip(config.transobj['freechatgpt_tips']) - self.translate_label1.setStyleSheet("""border:0;background-color:transparent;font-size:12px;color:#999""") - self.horizontalLayout_5.addWidget(self.translate_label1) - - self.translate_label1.hide() + self.horizontalLayout_5.addStretch() self.layout_proxy = QtWidgets.QFormLayout() diff --git a/videotrans/util/tools.py b/videotrans/util/tools.py index 288162cd..67fe368e 100644 --- a/videotrans/util/tools.py +++ b/videotrans/util/tools.py @@ -87,6 +87,9 @@ def set_proxy(set_val=''): if not set_val.startswith("http") and not set_val.startswith('sock'): set_val = f"http://{set_val}" config.proxy = set_val + os.environ['http_proxy']=set_val + os.environ['https_proxy']=set_val + os.environ['all_proxy']=set_val return set_val # 获取代理 @@ -890,13 +893,14 @@ def set_process_box(text, type='logs', *, func_name=""): # 综合写入日志,默认sp界面 -def set_process(text, type="logs", *, qname='sp', func_name="", btnkey=""): +def set_process(text, type="logs", *, qname='sp', func_name="", btnkey="",nologs=False): try: if text: - if text.startswith("[error]") or type == 'error': - config.logger.error(text) - else: - config.logger.info(text) + if not nologs: + if type == 'error': + config.logger.error(text) + else: + config.logger.info(text) # 移除html if type == 'error':