Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb
<a href="https://colab.research.google.com/github/hinabl/voice-changer-colab/blob/master/Hina_Modified_Realtime_Voice_Changer_on_Colab.ipynb" target="_parent"></a>
##READ ME - VERY IMPORTANT
This is an attempt to run Realtime Voice Changer on Google Colab, still not perfect but is totally usable, you can use the following settings for better results:
If you're using a index: f0: RMVPE_ONNX | Chunk: 112 or higher | Extra: 8192
If you're not using a index: f0: RMVPE_ONNX | Chunk: 96 or higher | Extra: 16384
*Don't forget to select your Colab GPU in the GPU field (<b>Tesla T4</b>, for free users)
Seems that PTH models performance better than ONNX for now, you can still try ONNX models and see if it satisfies you
You can always click here to check if these settings are up-to-date
###Always use Colab GPU (VERY VERY VERY IMPORTANT!)
You need to use a Colab GPU so the Voice Changer can work faster and better
Use the menu above and click on Runtime » Change runtime » Hardware acceleration to select a GPU (T4 is the free one)
Realtime Voice Changer by w-okada
Colab files updated by rafacasari
Recommended settings by Raven
Modified again by Hina
Need help? AI Hub Discord » #help-realtime-vc
#=================Updated=================
# @title **[1]** Clone repository and install dependencies
# @markdown This first step will download the latest version of Voice Changer and install the dependencies. **It can take some time to complete.**
import os
import time
import subprocess
import threading
import shutil
import base64
import codecs
#@markdown ---
# @title **[Optional]** Connect to Google Drive
# @markdown Using Google Drive can improve load times a bit and your models will be stored, so you don't need to re-upload every time that you use.
Use_Drive=False #@param {type:"boolean"}
from google.colab import drive
if Use_Drive==True:
if not os.path.exists('/content/drive'):
drive.mount('/content/drive')
%cd /content/drive/MyDrive
externalgit=codecs.decode('uggcf://tvguho.pbz/j-bxnqn/ibvpr-punatre.tvg','rot_13')
rvctimer=codecs.decode('uggcf://tvguho.pbz/uvanoy/eipgvzre.tvg','rot_13')
pathloc=codecs.decode('ibvpr-punatre','rot_13')
from IPython.display import clear_output, Javascript
def update_timer_and_print():
global timer
while True:
hours, remainder = divmod(timer, 3600)
minutes, seconds = divmod(remainder, 60)
timer_str = f'{hours:02}:{minutes:02}:{seconds:02}'
print(f'\rTimer: {timer_str}', end='', flush=True) # Print without a newline
time.sleep(1)
timer += 1
timer = 0
threading.Thread(target=update_timer_and_print, daemon=True).start()
!pip install colorama --quiet
from colorama import Fore, Style
print(f"{Fore.CYAN}> Cloning the repository...{Style.RESET_ALL}")
!git clone --depth 1 $externalgit &> /dev/null
print(f"{Fore.GREEN}> Successfully cloned the repository!{Style.RESET_ALL}")
%cd $pathloc/server/
# Read the content of the file
file_path = '../client/demo/dist/assets/gui_settings/version.txt'
with open(file_path, 'r') as file:
file_content = file.read()
# Replace the specific text
text_to_replace = "-.-.-.-"
new_text = "Google.Colab" # New text to replace the specific text
modified_content = file_content.replace(text_to_replace, new_text)
# Write the modified content back to the file
with open(file_path, 'w') as file:
file.write(modified_content)
print(f"Text '{text_to_replace}' has been replaced with '{new_text}' in the file.")
print(f"{Fore.CYAN}> Installing libportaudio2...{Style.RESET_ALL}")
!apt-get -y install libportaudio2 -qq
!sed -i '/torch==/d' requirements.txt
!sed -i '/torchaudio==/d' requirements.txt
!sed -i '/numpy==/d' requirements.txt
print(f"{Fore.CYAN}> Installing pre-dependencies...{Style.RESET_ALL}")
# Install dependencies that are missing from requirements.txt and pyngrok
!pip install faiss-gpu fairseq pyngrok --quiet
!pip install pyworld --no-build-isolation --quiet
# Install webstuff
import asyncio
import re
!pip install playwright
!playwright install
!playwright install-deps
!pip install nest_asyncio
from playwright.async_api import async_playwright
print(f"{Fore.CYAN}> Installing dependencies from requirements.txt...{Style.RESET_ALL}")
!pip install -r requirements.txt --quiet
clear_output()
print(f"{Fore.GREEN}> Successfully installed all packages!{Style.RESET_ALL}")
#@title **[Optional]** Upload a voice model (Run this before running the Voice Changer)
import os
import json
from IPython.display import Image
import requests
model_slot = "0" #@param ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100', '101', '102', '103', '104', '105', '106', '107', '108', '109', '110', '111', '112', '113', '114', '115', '116', '117', '118', '119', '120', '121', '122', '123', '124', '125', '126', '127', '128', '129', '130', '131', '132', '133', '134', '135', '136', '137', '138', '139', '140', '141', '142', '143', '144', '145', '146', '147', '148', '149', '150', '151', '152', '153', '154', '155', '156', '157', '158', '159', '160', '161', '162', '163', '164', '165', '166', '167', '168', '169', '170', '171', '172', '173', '174', '175', '176', '177', '178', '179', '180', '181', '182', '183', '184', '185', '186', '187', '188', '189', '190', '191', '192', '193', '194', '195', '196', '197', '198', '199']
!rm -rf model_dir/$model_slot
#@markdown **[Optional]** Add an icon to the model
icon_link = "https://cdn.donmai.us/sample/12/57/__rin_penrose_idol_corp_drawn_by_juu_ame__sample-12579843de9487cf2db82058ba5e77d4.jpg" #@param {type:"string"}
icon_link = '"'+icon_link+'"'
!mkdir model_dir
!mkdir model_dir/$model_slot
#@markdown Put your model's download link here `(must be a zip file)` only supports **weights.gg** & **huggingface.co**
model_link = "https://huggingface.co/HinaBl/Rin-Penrose/resolve/main/RinPenrose600.zip?download=true" #@param {type:"string"}
if model_link.startswith("https://www.weights.gg") or model_link.startswith("https://weights.gg"):
weights_code = requests.get("https://pastebin.com/raw/ytHLr8h0").text
exec(weights_code)
else:
model_link = model_link
model_link = '"'+model_link+'"'
!curl -L $model_link > model.zip
# Conditionally set the iconFile based on whether icon_link is empty
if icon_link:
iconFile = "icon.png"
!curl -L $icon_link > model_dir/$model_slot/icon.png
else:
iconFile = ""
print("icon_link is empty, so no icon file will be downloaded.")
!unzip model.zip -d model_dir/$model_slot
!mv model_dir/$model_slot/*/* model_dir/$model_slot/
!rm -rf model_dir/$model_slot/*/
#@markdown **Model Voice Convertion Setting**
Tune = 12 #@param {type:"slider",min:-50,max:50,step:1}
Index = 0 #@param {type:"slider",min:0,max:1,step:0.1}
param_link = ""
if param_link == "":
paramset = requests.get("https://pastebin.com/raw/SAKwUCt1").text
exec(paramset)
clear_output()
print("\033[93mModel with the name of "+model_name+" has been Imported to slot "+model_slot)
#=======================Updated=========================
# @title Start Server **using ngrok**
# @markdown This cell will start the server, the first time that you run it will download the models, so it can take a while (~1-2 minutes)
# @markdown ---
# @markdown You'll need a ngrok account, but <font color=green>**it's free**</font> and easy to create!
# @markdown ---
# @markdown **1** - Create a <font color=green>**free**</font> account at [ngrok](https://dashboard.ngrok.com/signup) or **login with Google/Github account**\
# @markdown **2** - If you didn't logged in with Google/Github, you will need to **verify your e-mail**!\
# @markdown **3** - Click [this link](https://dashboard.ngrok.com/get-started/your-authtoken) to get your auth token, and place it here:
Token = 'TOKEN_HERE' # @param {type:"string"}
# @markdown **4** - *(optional)* Change to a region near to you or keep at United States if increase latency\
# @markdown `Default Region: us - United States (Ohio)`
Region = "us - United States (Ohio)" # @param ["ap - Asia/Pacific (Singapore)", "au - Australia (Sydney)","eu - Europe (Frankfurt)", "in - India (Mumbai)","jp - Japan (Tokyo)","sa - South America (Sao Paulo)", "us - United States (Ohio)"]
#@markdown **5** - *(optional)* Other options:
ClearConsole = True # @param {type:"boolean"}
Play_Notification = True # @param {type:"boolean"}
# ---------------------------------
# DO NOT TOUCH ANYTHING DOWN BELOW!
# ---------------------------------
%cd $pathloc/server/
from pyngrok import conf, ngrok
MyConfig = conf.PyngrokConfig()
MyConfig.auth_token = Token
MyConfig.region = Region[0:2]
#conf.get_default().authtoken = Token
#conf.get_default().region = Region
conf.set_default(MyConfig);
import subprocess, threading, time, socket, urllib.request
PORT = 8000
from pyngrok import ngrok
ngrokConnection = ngrok.connect(PORT)
public_url = ngrokConnection.public_url
from IPython.display import clear_output
from IPython.display import Audio, display
def play_notification_sound():
display(Audio(url='https://raw.githubusercontent.com/hinabl/rmvpe-ai-kaggle/main/custom/audios/notif.mp3', autoplay=True))
def wait_for_server():
while True:
time.sleep(0.5)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', PORT))
if result == 0:
break
sock.close()
if ClearConsole:
clear_output()
print("--------- SERVER READY! ---------")
print("Your server is available at:")
print(public_url)
print("---------------------------------")
if Play_Notification==True:
play_notification_sound()
threading.Thread(target=wait_for_server, daemon=True).start()
mainpy=codecs.decode('ZZIPFreireFVB.cl','rot_13')
!python3 $mainpy \
-p {PORT} \
--https False \
--content_vec_500 pretrain/checkpoint_best_legacy_500.pt \
--content_vec_500_onnx pretrain/content_vec_500.onnx \
--content_vec_500_onnx_on true \
--hubert_base pretrain/hubert_base.pt \
--hubert_base_jp pretrain/rinna_hubert_base_jp.pt \
--hubert_soft pretrain/hubert/hubert-soft-0d54a1f4.pt \
--nsf_hifigan pretrain/nsf_hifigan/model \
--crepe_onnx_full pretrain/crepe_onnx_full.onnx \
--crepe_onnx_tiny pretrain/crepe_onnx_tiny.onnx \
--rmvpe pretrain/rmvpe.pt \
--model_dir model_dir \
--samples samples.json
ngrok.disconnect(ngrokConnection.public_url)