Fangjun Kuang
Committed by GitHub

Fix running MeloTTS models on GPU. (#1379)

We need to use opset 18 to export the model to onnx.
@@ -229,7 +229,7 @@ def main(): @@ -229,7 +229,7 @@ def main():
229 229
230 torch_model = ModelWrapper(model) 230 torch_model = ModelWrapper(model)
231 231
232 - opset_version = 13 232 + opset_version = 18
233 x = torch.randint(low=0, high=10, size=(60,), dtype=torch.int64) 233 x = torch.randint(low=0, high=10, size=(60,), dtype=torch.int64)
234 print(x.shape) 234 print(x.shape)
235 x_lengths = torch.tensor([x.size(0)], dtype=torch.int64) 235 x_lengths = torch.tensor([x.size(0)], dtype=torch.int64)