Skip to content

Commit

Permalink
Merge pull request #13 from cyberofficial/dev-testing
Browse files Browse the repository at this point in the history
Ram Check & Mic Table
  • Loading branch information
cyberofficial authored Apr 2, 2023
2 parents 7755a92 + f73499f commit 615ae33
Showing 1 changed file with 41 additions and 35 deletions.
76 changes: 41 additions & 35 deletions transcribe_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
from tqdm import tqdm
from datetime import datetime
from numba import cuda
from prettytable import PrettyTable
try:
from dateutil.tz import tzlocal
except:
Expand Down Expand Up @@ -244,6 +245,9 @@ def is_input_device(device_index):
print("@DaniruKun from https://watsonindustries.live")
exit()

# set args.ram to lowercase
args.ram = args.ram.lower()

if args.ram == "1gb":
model = "tiny"
elif args.ram == "2gb":
Expand Down Expand Up @@ -306,9 +310,14 @@ def is_input_device(device_index):
# list all microphones that are available then set the source to desired microphone
if args.list_microphones:
print("Available microphone devices are: ")
mic_table = PrettyTable()
mic_table.field_names = ["Index", "Microphone Name"]

for index, name in enumerate(sr.Microphone.list_microphone_names()):
if is_input_device(index):
print(f"Microphone with name \"{name}\" found, the device index is {index}")
mic_table.add_row([index, name])

print(mic_table)
# exit program
sys.exit(0)

Expand Down Expand Up @@ -340,45 +349,42 @@ def is_input_device(device_index):
# final check before loading or downloading the model, check to see if cuda was chosen. If the user chosen cuda and the ram flag was more than the device, set the ram flag to be 1 step lower than the cuda vram
# "1gb", "2gb", "4gb", "6gb", "12gb" are t he valid ram flags
# if the user has chosen a ram flag that is lower than the cuda vram, then set the ram flag to be 1 step lower than the cuda vram if device.type == "cuda":
def print_warning(old_ram_flag, new_ram_flag, needed_vram, detected_vram):
print(f"WARNING: CUDA was chosen, but the VRAM available is less than {old_ram_flag}. You have {detected_vram:.2f} MB available, and {needed_vram - detected_vram:.2f} MB additional overhead is needed. Setting ram flag to avoid out of memory errors. New Flag: {new_ram_flag}")
operating_system = platform.system()
print(f"Remember that {operating_system} will use VRAM for other processes, so you may need to lower the ram flag even more to avoid out of memory errors.")

if device.type == "cuda":
cuda_vram = torch.cuda.get_device_properties(torch.cuda.current_device()).total_memory / 1024 / 1024

#################
#hard set vram test
#cuda_vram = 4096
#print(f"VRAM available: {cuda_vram:.2f} MB")
#################

overhead_buffer = 200

def print_warning(ram_flag, needed_vram, detected_vram):
print(f"WARNING: CUDA was chosen but the VRAM available is less than {ram_flag}. You have {detected_vram:.2f} MB available, and {needed_vram - detected_vram:.2f} MB additional overhead is needed. Setting ram flag to avoid out of memory errors. New Flag: {args.ram}")

if args.ram == "12gb" and cuda_vram < 12000 + overhead_buffer:
args.ram = "6gb"
print_warning("12 GB", 12288 + overhead_buffer, cuda_vram)
elif args.ram == "6gb" and cuda_vram < 6144 + overhead_buffer:
args.ram = "4gb"
print_warning("6 GB", 6144 + overhead_buffer, cuda_vram)
elif args.ram == "4gb" and cuda_vram < 4096 + overhead_buffer:
args.ram = "2gb"
print_warning("4 GB", 4096 + overhead_buffer, cuda_vram)
elif args.ram == "2gb" and cuda_vram < 2048 + overhead_buffer:
args.ram = "1gb"
print_warning("2 GB", 2048 + overhead_buffer, cuda_vram)
elif args.ram == "1gb" and cuda_vram < 1024 + overhead_buffer:
args.ram = "1gb"
print_warning("1 GB", 1024 + overhead_buffer, cuda_vram)
device = torch.device("cpu")
print("WARNING: CUDA was chosen but the VRAM available is less than 1 GB. Falling back to CPU.")



ram_options = [("12gb", 12000), ("6gb", 6144), ("4gb", 4096), ("2gb", 2048), ("1gb", 1024)]

found = False
old_ram_flag = args.ram
for i, (ram_option, required_vram) in enumerate(ram_options):
if args.ram == ram_option and cuda_vram < required_vram + overhead_buffer:
if i + 1 < len(ram_options):
args.ram = ram_options[i + 1][0]
else:
args.ram = ram_option
device = torch.device("cpu")
print("WARNING: CUDA was chosen, but the VRAM available is less than 1 GB. Falling back to CPU.")
break
else:
found = True
break

if not found:
device = torch.device("cpu")
print("WARNING: No suitable RAM setting was found. Falling back to CPU.")
elif old_ram_flag != args.ram:
print_warning(old_ram_flag, args.ram, required_vram + overhead_buffer, cuda_vram)

# check if ram size is set to 1gb, 2gb, or 4gb if so download compressed model else download fine-tuned model
if args.ram == "1gb" or args.ram == "2gb" or args.ram == "4gb":
red_text = Style.BRIGHT + Fore.RED
reset_text = Style.RESET_ALL
if not os.path.exists("models/fine_tuned_model_compressed.pt"):
print("Warning - Since you have chosen a low amount of RAM, the fine-tuned model will be downloaded in a compressed format.\nThis will result in a some what faster startup time and a slower inference time, but will also result in slight reduction in accuracy.")
print("Compressed Fine-tuned model not found. Downloading Compressed fine-tuned model... [Via OneDrive (Public)]")
Expand All @@ -393,7 +399,7 @@ def print_warning(ram_flag, needed_vram, detected_vram):
except Exception as e:
print("Failed to load fine-tuned model. Results may be inaccurate. If you experience issues, please delete the fine-tuned model from the models folder and restart the program. If you still experience issues, please open an issue on GitHub.")
red_text = Fore.RED + Back.BLACK
print(f"{red_text}Error: {e}{style.RESET_ALL}")
print(f"{red_text}Error: {e}{reset_text}")
pass
else:
# load the fine-tuned model into memory
Expand All @@ -406,7 +412,7 @@ def print_warning(ram_flag, needed_vram, detected_vram):
except Exception as e:
print("Failed to load fine-tuned model. Results may be inaccurate. If you experience issues, please delete the fine-tuned model from the models folder and restart the program. If you still experience issues, please open an issue on GitHub.")
red_text = Fore.RED + Back.BLACK
print(f"{red_text}Error: {e}{style.RESET_ALL}")
print(f"{red_text}Error: {e}{reset_text}")
pass
else:
if not os.path.exists("models/fine_tuned_model.pt"):
Expand All @@ -422,7 +428,7 @@ def print_warning(ram_flag, needed_vram, detected_vram):
except Exception as e:
print("Failed to load fine-tuned model. Results may be inaccurate. If you experience issues, please delete the fine-tuned model from the models folder and restart the program. If you still experience issues, please open an issue on GitHub.")
red_text = Fore.RED + Back.BLACK
print(f"{red_text}Error: {e}{style.RESET_ALL}")
print(f"{red_text}Error: {e}{reset_text}")
pass
else:
# load the fine-tuned model into memory
Expand All @@ -432,7 +438,7 @@ def print_warning(ram_flag, needed_vram, detected_vram):
except Exception as e:
print("Failed to load fine-tuned model. Results may be inaccurate. If you experience issues, please delete the fine-tuned model from the models folder and restart the program. If you still experience issues, please open an issue on GitHub.")
red_text = Fore.RED + Back.BLACK
print(f"{red_text}Error: {e}{style.RESET_ALL}")
print(f"{red_text}Error: {e}{reset_text}")
pass


Expand Down

0 comments on commit 615ae33

Please sign in to comment.