parser.add_argument("--model-path", default="models/deepspeech_final.pth",
help="Path to model file created by training")
parser.add_argument("--logits", default="", type=str, help="Path to logits from test.py")
parser.add_argument("--test-manifest", metavar="DIR",
help="path to validation manifest csv", default="data/test_manifest.csv")
parser.add_argument("--num-workers", default=16, type=int, help="Number of parallel decodes to run")
parser.add_argument("--output-path", default="tune_results.json", help="Where to save tuning results")
parser.add_argument("--lm-alpha-from", default=1, type=float, help="Language model weight start tuning")
parser.add_argument("--lm-alpha-to", default=3.2, type=float, help="Language model weight end tuning")
After Change
parser = add_decoder_args(parser)
args = parser.parse_args()
if args.lm_path is None:
print("error: LM must be provided for tuning")
sys.exit(1)
model = DeepSpeech.load_model(args.model_path)
saved_output = np.load(args.saved_output)