Skip to content

Commit

Permalink
fix: bugs and update
Browse files Browse the repository at this point in the history
  • Loading branch information
jianchang512 committed May 12, 2024
2 parents 5e00e52 + 9a14089 commit a5cdbe0
Show file tree
Hide file tree
Showing 17 changed files with 116 additions and 90 deletions.
18 changes: 18 additions & 0 deletions about.md
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,24 @@ Contribute to the project to help with its ongoing maintenance
- M*u(LiuVfx) / 2024-4-23 捐助 6.66 元
- *波(支付宝) / 2024-4-23 捐助 88.88 元
- *华 / 2024-4-24 捐助 30 元
- *OBJ / 2024-4-26 捐助 1 元
- M*i / 2024-4-28 捐助 28 元
- **彬(支付宝) / 2024-4-28 捐助 20 元
- *籽 / 2024-4-29 捐助 1 元
- *笑 / 2024-5-1 捐助 2 元
- **豹(支付宝) / 2024-5-1 捐助 11 元
- **豹(支付宝) / 2024-5-1 捐助 10 元
- *磊 / 2024-5-4 捐助 2 元
- Anthony Tran(ko-fi) / 2024-5-4 捐助 $20
- *蟹 / 2024-5-6 捐助 20 元
- x*y / 2024-5-6 捐助 0.5 元
- *曹 / 2024-5-7 捐助 6 元
- *维 / 2024-5-8 捐助 50 元
- *林 / 2024-5-9 捐助 20 元
- **璨(支付宝) / 2024-5-9 捐助 5 元
- *易 / 2024-5-10 捐助 5 元
- **文(支付宝) / 2024-5-10 捐助 100 元



**未标注付款方式的即为微信支付,括号内标注为GitHub用户名,感谢所有支持者,软件的每一点进步都离不开您的支持和帮助。**
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ click==7.1.2
colorama==0.4.6
coloredlogs==15.0.1
contourpy==1.1.1
ctranslate2==4.0.0
ctranslate2==3.24.0
cycler==0.12.1
decorator==4.4.2
deepl==1.16.1
Expand Down
1 change: 1 addition & 0 deletions sp.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def run(self):
et = time.time()
self.close()
print(f'启动用时:{et - st}')
print(f'代理='+(os.environ.get('http_proxy','') or os.environ.get('https_proxy','')))
if not nostyle.exists():
import videotrans.ui.dark.darkstyle_rc
with open('./videotrans/styles/style.qss', 'r', encoding='utf-8') as f:
Expand Down
4 changes: 2 additions & 2 deletions version.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{
"version": "1.73",
"version_num": 11073
"version": "1.75",
"version_num": 11075
}
4 changes: 2 additions & 2 deletions videotrans/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# -*- coding: utf-8 -*-

VERSION="v1.74"
VERSION_NUM=110704
VERSION="v1.75"
VERSION_NUM=110705
2 changes: 1 addition & 1 deletion videotrans/box/win.py
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ def shibie_start_fun(self):
if not allow:
self.is_cuda.setChecked(False)
return QMessageBox.critical(self, config.transobj['anerror'], config.transobj["nocudnn"])
if model_type == 'faster':
if model_type == 'faster' and model.find('/')==-1:
file = f'{config.rootdir}/models/models--Systran--faster-whisper-{model}/snapshots'
if model.startswith('distil'):
file = f'{config.rootdir}/models/models--Systran--faster-{model}/snapshots'
Expand Down
25 changes: 8 additions & 17 deletions videotrans/configure/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ def parse_init():
with file.open('r', encoding="utf-8") as f:
# 遍历.ini文件中的每个section
for it in f.readlines():

it = it.strip()
if not it or it.startswith(';'):
continue
Expand Down Expand Up @@ -126,6 +127,8 @@ def parse_init():

# 初始化一个字典变量
settings = parse_init()


# default language 如果 ini中设置了,则直接使用,否则自动判断
if settings['lang']:
defaulelang = settings['lang'].lower()
Expand Down Expand Up @@ -164,24 +167,12 @@ def parse_init():
# box窗口
queuebox_logs = Queue(1000)




model_list=settings['model_list'].split(',')
# model_list=[
# "tiny",
# "tiny.en",
# "base",
# "base.en",
# "small",
# "small.en",
# "medium",
# "medium.en",
# "large-v1",
# "large-v2",
# "large-v3",
# "distil-whisper-small.en",
# "distil-whisper-medium.en",
# "distil-whisper-large-v2",
# "distil-whisper-large-v3"
# ]



# 开始按钮状态
current_status = "stop"
Expand Down
8 changes: 2 additions & 6 deletions videotrans/mainwin/secwin.py
Original file line number Diff line number Diff line change
Expand Up @@ -601,8 +601,6 @@ def open_url(self, title):
本软件的所有解释权均属于开发者。谨请用户在理解、同意、遵守本免责声明的前提下使用本软件。
""")

elif title == 'freechatgpt':
webbrowser.open_new_tab("https://apiskey.top")
elif title == 'aihelp':
webbrowser.open_new_tab("https://www.coze.cn/store/bot/7358853334134112296?panel=1")

Expand Down Expand Up @@ -666,10 +664,6 @@ def set_translate_type(self, name):
self.main.subform.set_transapi()
return
config.params['translate_type'] = name
if name == translator.FREECHATGPT_NAME:
self.main.translate_label1.show()
else:
self.main.translate_label1.hide()
except Exception as e:
QMessageBox.critical(self.main, config.transobj['anerror'], str(e))

Expand Down Expand Up @@ -715,6 +709,8 @@ def model_type_change(self):
def check_whisper_model(self, name):
if self.main.model_type.currentIndex() in [2,3]:
return True
if name.find('/')>0:
return True
slang = self.main.source_language.currentText()
if name.endswith('.en') and translator.get_code(show_text=slang) != 'en':
QMessageBox.critical(self.main, config.transobj['anerror'], config.transobj['enmodelerror'])
Expand Down
15 changes: 6 additions & 9 deletions videotrans/mainwin/spwin.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from videotrans.task.get_role_list import GetRoleWorker
from videotrans.util import tools

from videotrans.translator import TRANSNAMES, FREECHATGPT_NAME
from videotrans.translator import TRANSNAMES
from videotrans.configure import config
from videotrans import VERSION
from videotrans.component.controlobj import TextGetdir
Expand Down Expand Up @@ -110,10 +110,7 @@ def initUI(self):

# translation type
self.translate_type.addItems(TRANSNAMES)
translate_name = config.params['translate_type'] if config.params['translate_type'] in TRANSNAMES else \
TRANSNAMES[0]
if translate_name == FREECHATGPT_NAME:
self.translate_label1.show()
translate_name = config.params['translate_type'] if config.params['translate_type'] in TRANSNAMES else TRANSNAMES[0]

self.translate_type.setCurrentText(translate_name)

Expand All @@ -125,7 +122,8 @@ def initUI(self):
d = {"all": 0, 'split': 1, "avg": 2, "": 0}
self.whisper_type.setCurrentIndex(d[config.params['whisper_type']])
self.whisper_model.addItems(config.model_list)
self.whisper_model.setCurrentText(config.params['whisper_model'])
if config.params['whisper_model'] in config.model_list:
self.whisper_model.setCurrentText(config.params['whisper_model'])
if config.params['model_type'] == 'openai':
self.model_type.setCurrentIndex(1)
elif config.params['model_type'] == 'GoogleSpeech':
Expand Down Expand Up @@ -249,7 +247,6 @@ def bind_action(self):
self.export_sub.setToolTip(
config.transobj['When subtitles exist, the subtitle content can be saved to a local SRT file'])

self.translate_label1.clicked.connect(lambda: self.util.open_url('freechatgpt'))

self.set_line_role.clicked.connect(self.subform.set_line_role_fun)
self.set_line_role.setCursor(Qt.PointingHandCursor)
Expand Down Expand Up @@ -444,8 +441,6 @@ def get_setting(self):
config.clone_voicelist = self.settings.value("clone_voicelist", "").split(',')

config.params["chatgpt_model"] = self.settings.value("chatgpt_model", config.params['chatgpt_model'])
if config.params["chatgpt_model"] == 'large':
config.params["chatgpt_model"] = 'large-v3'
os.environ['OPENAI_API_KEY'] = config.params["chatgpt_key"]

config.params["ttsapi_url"] = self.settings.value("ttsapi_url", "")
Expand All @@ -469,6 +464,8 @@ def get_setting(self):
config.params["elevenlabstts_key"] = self.settings.value("elevenlabstts_key", "")

config.params['translate_type'] = self.settings.value("translate_type", config.params['translate_type'])
if config.params['translate_type']=='FreeChatGPT':
config.params['translate_type']='FreeGoogle'
config.params['subtitle_type'] = self.settings.value("subtitle_type", config.params['subtitle_type'], int)
config.proxy = self.settings.value("proxy", "", str)
config.params['voice_rate'] = self.settings.value("voice_rate", config.params['voice_rate'].replace('%','').replace('+',''), str)
Expand Down
23 changes: 15 additions & 8 deletions videotrans/recognition/all.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,32 +11,39 @@ def recogn(*,
detect_language=None,
audio_file=None,
cache_folder=None,
model_name="base",
model_name="tiny",
set_p=True,
inst=None,
is_cuda=None):
if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'):
return False
if set_p:
tools.set_process(f"{config.transobj['kaishishibie']}",
btnkey=inst.init['btnkey'] if inst else "")
down_root = os.path.normpath(config.rootdir + "/models")
down_root = config.rootdir + "/models"
if set_p and inst:
if model_name.find('/')>0:
if not os.path.isdir(down_root+'/models--'+model_name.replace('/','--')):
inst.parent.status_text='下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Download model from huggingface'
else:
inst.parent.status_text='加载或下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Load model from local or download model from huggingface'
else:
tools.set_process(f"{config.transobj['kaishishibie']}",btnkey=inst.init['btnkey'] if inst else "")
model = None
try:
if model_name.startswith('distil-'):
com_type= "float32"
com_type= "default"
elif is_cuda:
com_type=config.settings['cuda_com_type']
else:
com_type='int8'
com_type='default'
local_res=True if model_name.find('/')==-1 else False

model = WhisperModel(model_name,
device="cuda" if is_cuda else "cpu",
compute_type=com_type,
download_root=down_root,
num_workers=config.settings['whisper_worker'],
cpu_threads=os.cpu_count() if int(config.settings['whisper_threads']) < 1 else int(
config.settings['whisper_threads']),
local_files_only=True)
local_files_only=local_res)
if config.current_status != 'ing' and config.box_recogn != 'ing':
return False
if not tools.vail_file(audio_file):
Expand Down
17 changes: 12 additions & 5 deletions videotrans/recognition/avg.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def recogn(*,
detect_language=None,
audio_file=None,
cache_folder=None,
model_name="base",
model_name="tiny",
set_p=True,
inst=None,
is_cuda=None):
Expand Down Expand Up @@ -72,17 +72,24 @@ def recogn(*,
total_length = len(nonsilent_data)
start_t = time.time()
if model_name.startswith('distil-'):
com_type= "float32"
com_type= "default"
elif is_cuda:
com_type=config.settings['cuda_com_type']
else:
com_type='int8'
com_type='default'
local_res=True if model_name.find('/')==-1 else False
down_root=config.rootdir + "/models"
if set_p and inst and model_name.find('/')>0:
if not os.path.isdir(down_root+'/models--'+model_name.replace('/','--')):
inst.parent.status_text='下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Download model from huggingface'
else:
inst.parent.status_text='加载或下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Load model from local or download model from huggingface'
model = WhisperModel(
model_name,
device="cuda" if config.params['cuda'] else "cpu",
compute_type=com_type,
download_root=config.rootdir + "/models",
local_files_only=True)
download_root=down_root,
local_files_only=local_res)
for i, duration in enumerate(nonsilent_data):
if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'):
del model
Expand Down
2 changes: 1 addition & 1 deletion videotrans/recognition/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def recogn(*,
detect_language=None,
audio_file=None,
cache_folder=None,
model_name="base",
model_name="tiny",
set_p=True,
inst=None,
is_cuda=None):
Expand Down
19 changes: 13 additions & 6 deletions videotrans/recognition/yuxian.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,10 @@ def recogn(*,
set_p=True,
inst=None,
is_cuda=None):
if set_p:
tools.set_process(config.transobj['fengeyinpinshuju'], btnkey=inst.init['btnkey'] if inst else "")
if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'):
return False
if set_p:
tools.set_process(config.transobj['fengeyinpinshuju'], btnkey=inst.init['btnkey'] if inst else "")
noextname = os.path.basename(audio_file)
tmp_path = f'{cache_folder}/{noextname}_tmp'
if not os.path.isdir(tmp_path):
Expand All @@ -69,17 +69,24 @@ def recogn(*,
raw_subtitles = []
total_length = len(nonsilent_data)
if model_name.startswith('distil-'):
com_type= "float32"
com_type= "default"
elif is_cuda:
com_type=config.settings['cuda_com_type']
else:
com_type='int8'
com_type='default'
local_res=True if model_name.find('/')==-1 else False
down_root=config.rootdir + "/models"
if set_p and inst and model_name.find('/')>0:
if not os.path.isdir(down_root+'/models--'+model_name.replace('/','--')):
inst.parent.status_text='下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Download model from huggingface'
else:
inst.parent.status_text='加载或下载模型中,用时可能较久' if config.defaulelang=='zh'else 'Load model from local or download model from huggingface'
model = WhisperModel(
model_name,
device="cuda" if is_cuda else "cpu",
compute_type=com_type,
download_root=config.rootdir + "/models",
local_files_only=True)
download_root=down_root,
local_files_only=local_res)
for i, duration in enumerate(nonsilent_data):
if config.exit_soft or (config.current_status != 'ing' and config.box_recogn != 'ing'):
del model
Expand Down
2 changes: 1 addition & 1 deletion videotrans/task/check_update.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def get(self):
if proxy:
proxies={"http":proxy,"https":proxy}

res=requests.get("https://raw.githubusercontent.com/jianchang512/pyvideotrans/main/version.json",proxies=proxies)
res=requests.get("https://pyvideotrans.com/version.json",proxies=proxies)
if res.status_code==200:
d=res.json()
if d['version_num']>videotrans.VERSION_NUM:
Expand Down
Loading

0 comments on commit a5cdbe0

Please sign in to comment.