I'm currently trying to import my neural network model I made in pytorch into unity. This is the code of my NN:
import torch
import torch.nn as nn
import onnx
device = "cuda" if torch.cuda.is_available() else "cpu"
class EnhancedMLP(nn.Module):
def __init__(self, input_size, output_size):
super(EnhancedMLP, self).__init__()
# Doubling the number of layers and units
self.fc1 = nn.Linear(input_size, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 256)
self.fc4 = nn.Linear(256, 256)
self.fc5 = nn.Linear(256, output_size)
self.activation = nn.ReLU()
def forward(self, x):
output = self.fc5(self.activation(
self.fc4(self.activation(self.fc3(self.activation(self.fc2(self.activation(self.fc1(x)))))))))
continuous_action_shape = torch.tensor([2], dtype=torch.int64) # Assuming 2 continuous actions
return output, continuous_action_shape
model = EnhancedMLP(input_size=96, output_size=2).to(device)
# set the model to inference mode
model.eval()
# Create a dummy input tensor
dummy_input = torch.randn(1, 96).to(device)
# Export the model to ONNX format
torch.onnx.export(model, # model being run
dummy_input, # model input (or a tuple for multiple inputs)
"Albert.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=['vector_observation'], # the model's input name
output_names=['continuous_actions', 'continuous_action_output_shape'], # the model's output names
dynamic_axes={'vector_observation': {0: 'batch_size'}, # dynamic axis for the input tensor
'continuous_actions': {0: 'batch_size'},
'continuous_action_output_shape': {0: 'batch_size'}})
# Load the exported ONNX model
torchmodel = onnx.load('Albert.onnx')
graph = torchmodel.graph
# Add the version_number field
version_number = onnx.helper.make_tensor("version_number", onnx.TensorProto.INT64, [1], [3])
graph.initializer.append(version_number)
# Add the version_number to the model's outputs
version_number_info = onnx.helper.make_tensor_value_info("version_number", onnx.TensorProto.INT64, shape=[])
graph.output.append(version_number_info)
# Add the memory_size field
memory_size = onnx.helper.make_tensor("memory_size", onnx.TensorProto.INT64, [1], [0])
graph.initializer.append(memory_size)
# Add the memory_size to the model's outputs
memory_size_info = onnx.helper.make_tensor_value_info("memory_size", onnx.TensorProto.INT64, shape=[])
graph.output.append(memory_size_info)
# Define the continuous_action_output_shape tensor value info
continuous_action_shape_info = onnx.helper.make_tensor_value_info(
"continuous_action_output_shape",
onnx.TensorProto.INT64,
[1]
)
# Add the continuous_action_output_shape to the model's outputs
graph.output.append(continuous_action_shape_info)
# Define and add the actual tensor value for continuous_action_output_shape
continuous_action_shape_tensor = onnx.helper.make_tensor(
"continuous_action_output_shape",
onnx.TensorProto.INT64,
[1],
[2] # Assuming 2 continuous actions
)
graph.initializer.append(continuous_action_shape_tensor)
# Save the modified ONNX model
onnx.save(torchmodel, 'ModifiedAlbert.onnx')
print("Model has been converted to ONNX and modified.")
# model.eval()
# dummy_input = torch.randn(1, 96)
# dummy_input = dummy_input.to(device)
# torch.onnx.export(model, dummy_input, "AlbertModelMLP01mod.onnx")
# return torch.tensor([1.0, 0.0]).to(device)
model = onnx.load("Albert.onnx")
print(model.graph.input) # Check the input tensor names
print(model.graph.output) # Check the output tensor names
This is how it shows up in unity: the NN in the editor in unity I can't figure out the cause of the error, I hope some of you can.
The version of mlagents in unity is 3.0.0, these are the versions in python: cmd, so everything is as up to date as it gets. Do I lack something like f.ex. gym-unity, but when I take the default brain in unity and turn off inference, the training and everything works fine.