Figuring out multiprocessing with RVC

56 views Asked by At

Currently I am trying to find a way to make my script to launch a series of functions in parallel but errors keep coming up. At the moment I am using RVC-CLI, but I can change to original RVC repo and use infer.py;

The chain of functions looks like this at the moment:

echo_message -> synthesize -> Apply_RVC -> send_voice.

I am having trouble with function Apply_RVC, i want to run this chain in a new process??? When doing it (def Apply_RVC + await Apply_RVC) everything works perfectly fine, however when i try to use multiprocessing.Pool + Pool.map I get error from PyTorch about insufficient resources.

from telebot.async_telebot import AsyncTeleBot
from telebot.types import InputFile
from marina_neutral import synthesize
from apply_rvc import Apply_RVC
from multiprocessing import Pool
import time
import os
from chat_gpt import Get_Gpt

#Initializion of global variables and objects
pool=Pool()
procs=[]
cur_path = os.getcwd()
bot = AsyncTeleBot('_____')


def check_message(message):
    mes = message.lower()
    pat = r'\!./&><:"=_+,>;@#$%^*()-|'
    for e in pat:
        mes = mes.replace(f'{e}', '')
    with open(cur_path + '/ban_list.txt', 'r', encoding='utf-8') as f:
        for e in f.readlines():
            word = e.rstrip().lower()
            if word == 'сво':
                if word in mes.split():
                    return False
            else:
                if word in mes:
                    return False
    return True


@bot.message_handler(commands=['help', 'start'])
async def send_welcome(message):
    await bot.reply_to(message, """\
__________.\
""")


@bot.message_handler(func=lambda message: True)
async def echo_message(message):
    if check_message(message.text):
        ind=str(time.time_ns())
        await synthesize(Get_Gpt(message.text), cur_path+'/Sound_temp/'+str(message.chat.id)+ind+'.ogg','marina','neutral')
        pool.starmap(Apply_RVC,zip(cur_path+'/Sound_temp/'+str(message.chat.id)+ind+'.ogg',cur_path+'/Sound_final/'+str(message.chat.id)+ind+'_f.ogg'),chunksize=None)
        with open(cur_path+'/Sound_final/'+str(message.chat.id)+ind+'_f.ogg', 'rb') as f:
            await bot.send_voice(message.chat.id, InputFile(f))
    else:
        await bot.send_message(message.chat.id, '--------')



import asyncio

asyncio.run(bot.polling())

import os
import subprocess
import multiprocessing

cur_path=os.getcwd()
pth_path=cur_path.replace("/RVC_CLI-1.0.2","")+"/Config/cfg_RVC/roigh.pth"
index_path=cur_path.replace("/RVC_CLI-1.0.2","")+"/Config/cfg_RVC/F_index.index"

def Apply_RVC(input_path, output_path):
    cur_path=os.getcwd()
    if cur_path.count("/RVC_CLI-1.0.2")==0: os.chdir(cur_path+"/RVC_CLI-1.0.2")
    #subprocess.Popen(["python3", f"{os.getcwd()}/main.py", "infer", "0" , "0", "0", "256", "rmvpe", input_path, output_path, pth_path, index_path, "False"]).wait()
    os.system(f'python3 main.py infer 0 0 0 256 rmvpe "{input_path}" "{output_path}" "{pth_path}" "{index_path}" False')

  File "/home/user15096/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1143, in convert
    return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 3.79 GiB total capacity; 38.45 MiB already allocated; 4.50 MiB free; 46.00 MiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
Traceback (most recent call last):
  File "/home/user15096/Documents/Projects/ai_anna/RVC_CLI-1.0.2/rvc/infer/infer.py", line 240, in <module>
    get_vc(model_path, 0)
  File "/home/user15096/Documents/Projects/ai_anna/RVC_CLI-1.0.2/rvc/infer/infer.py", line 205, in get_vc
    net_g.eval().to(config.device)
  File "/home/user15096/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1145, in to
    return self._apply(convert)
  File "/home/user15096/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 797, in _apply
    module._apply(fn)
  File "/home/user15096/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 797, in _apply
    module._apply(fn)
  File "/home/user15096/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 820, in _apply
    param_applied = fn(param)
  File "/home/user15096/.local/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1143, in convert
    return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 2.00 MiB (GPU 0; 3.79 GiB total capacity; 81.06 MiB already allocated; 4.50 MiB free; 100.00 MiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
Failed to load audio: ffmpeg error (see stderr output for detail)
Voice conversion failed: cannot unpack non-iterable NoneType object

0

There are 0 answers