update server data
This commit is contained in:
		| @ -0,0 +1,82 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model and log the loss | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained  model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 18b69aa38c064c579c9b465d7a826081 | ||||
| run_id: 410d85525e5f4cfe9839a432d35f9ad2 | ||||
| utc_time_created: '2023-02-22 00:42:48.668457' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 957e2f6e4fd048c99aee3150c73c4078 | ||||
| run_id: c4fd84a025e1474d87cdc2919874b88c | ||||
| utc_time_created: '2023-02-22 00:41:33.282088' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,82 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model and log the loss | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained  model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,83 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('/mlflow_testing') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: ff8b845d6a174ffabfc49a18673c6c04 | ||||
| run_id: c248a4299f97423987a9496a2241ab1a | ||||
| utc_time_created: '2023-02-22 01:10:55.971443' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,83 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('mlflow_testing') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 27a96ad04f5a4578a3e1302500ad9a11 | ||||
| run_id: b7d7395b6b53404497f7656b07b71bf8 | ||||
| utc_time_created: '2023-02-22 01:11:36.809812' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,83 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('mlflow_testing') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 2625ea164ff248c194686ed5afb9a510 | ||||
| run_id: c293e8294f4f46adacd21465be08c608 | ||||
| utc_time_created: '2023-02-22 01:11:28.646127' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,83 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('mlflow_testing') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 2ee49fdb3ec647a58b1235498b186722 | ||||
| run_id: d548729629634031a93a46d6dab8b7da | ||||
| utc_time_created: '2023-02-22 01:11:33.149151' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,83 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 75aa23c5bb33452c978feeeffcdcb393 | ||||
| run_id: 0be79b1f3f7d480a9c7f497312887a37 | ||||
| utc_time_created: '2023-02-22 01:12:26.682417' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,83 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 20196b21b65b400eac57135de7cb2bac | ||||
| run_id: 2af28d45b569431ab10056469d63cf44 | ||||
| utc_time_created: '2023-02-22 01:12:30.183766' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,98 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| import mlflow | ||||
| from mlflow.models.signature import ModelSignature | ||||
| from mlflow.types.schema import Schema, ColSpec | ||||
| from tqdm import tqdm | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(5, 3) | ||||
|         self.fc2 = nn.Linear(3, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|         # log loss | ||||
|         mlflow.log_metric('train_loss', loss.item(), step=epoch) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     batch_size = 10 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=batch_size) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start a new MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|         mlflow.log_param('batch_size', batch_size) | ||||
|  | ||||
|         # create a signature to record model input and output | ||||
|         input_schema = Schema([ | ||||
|             ColSpec('float', 'age'), | ||||
|             ColSpec('float', 'mood level'), | ||||
|             ColSpec('float', 'health level'), | ||||
|             ColSpec('float', 'hungry level'), | ||||
|             ColSpec('float', 'sexy level') | ||||
|         ]) | ||||
|         output_schema = Schema([ColSpec('float', 'fortune')]) | ||||
|         signature = ModelSignature(inputs=input_schema, outputs=output_schema) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model', signature=signature) | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,21 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 4dc69e8ee0404f70b0c17f8119986704 | ||||
| run_id: 581cf34366f74ee7b90b18f0a606ddf8 | ||||
| signature: | ||||
|   inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"}, | ||||
|     {"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"}, | ||||
|     {"name": "sexy level", "type": "float"}]' | ||||
|   outputs: '[{"name": "fortune", "type": "float"}]' | ||||
| utc_time_created: '2023-02-22 02:46:27.876394' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,98 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| import mlflow | ||||
| from mlflow.models.signature import ModelSignature | ||||
| from mlflow.types.schema import Schema, ColSpec | ||||
| from tqdm import tqdm | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(5, 3) | ||||
|         self.fc2 = nn.Linear(3, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|         # log loss | ||||
|         mlflow.log_metric('train_loss', loss.item(), step=epoch) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     batch_size = 10 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=batch_size) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start a new MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|         mlflow.log_param('batch_size', batch_size) | ||||
|  | ||||
|         # create a signature to record model input and output | ||||
|         input_schema = Schema([ | ||||
|             ColSpec('float', 'age'), | ||||
|             ColSpec('float', 'mood level'), | ||||
|             ColSpec('float', 'health level'), | ||||
|             ColSpec('float', 'hungry level'), | ||||
|             ColSpec('float', 'sexy level') | ||||
|         ]) | ||||
|         output_schema = Schema([ColSpec('float', 'fortune')]) | ||||
|         signature = ModelSignature(inputs=input_schema, outputs=output_schema) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model', signature=signature) | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,21 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 1334c03425a6454fafce1db8fa3a2428 | ||||
| run_id: 5c8b249efc724568aca1b8a4d8750b43 | ||||
| signature: | ||||
|   inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"}, | ||||
|     {"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"}, | ||||
|     {"name": "sexy level", "type": "float"}]' | ||||
|   outputs: '[{"name": "fortune", "type": "float"}]' | ||||
| utc_time_created: '2023-02-22 02:46:24.939018' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,96 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| import mlflow | ||||
| from mlflow.models.signature import ModelSignature | ||||
| from mlflow.types.schema import Schema, ColSpec | ||||
| from tqdm import tqdm | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(5, 3) | ||||
|         self.fc2 = nn.Linear(3, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|         # log loss | ||||
|         mlflow.log_metric('train_loss', loss.item(), step=epoch) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start a new MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # create a signature to record model input and output | ||||
|         input_schema = Schema([ | ||||
|             ColSpec('float', 'age'), | ||||
|             ColSpec('float', 'mood level'), | ||||
|             ColSpec('float', 'health level'), | ||||
|             ColSpec('float', 'hungry level'), | ||||
|             ColSpec('float', 'sexy level') | ||||
|         ]) | ||||
|         output_schema = Schema([ColSpec('float', 'fortune')]) | ||||
|         signature = ModelSignature(inputs=input_schema, outputs=output_schema) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model', signature=signature) | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,21 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 3f51fe9a1b40461d8c445f37695d72e2 | ||||
| run_id: 7f0b13a30b10483589f7ca922050e721 | ||||
| signature: | ||||
|   inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"}, | ||||
|     {"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"}, | ||||
|     {"name": "sexy level", "type": "float"}]' | ||||
|   outputs: '[{"name": "fortune", "type": "float"}]' | ||||
| utc_time_created: '2023-02-22 02:43:39.269378' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,83 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| from tqdm import tqdm | ||||
| import mlflow | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(10, 5) | ||||
|         self.fc2 = nn.Linear(5, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start the MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model') | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,16 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: a94b93c2d2674c97a5db43fda608ec76 | ||||
| run_id: 80447ac20c814d10a470208e303ea0ec | ||||
| utc_time_created: '2023-02-22 01:12:22.906728' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,96 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| import mlflow | ||||
| from mlflow.models.signature import ModelSignature | ||||
| from mlflow.types.schema import Schema, ColSpec | ||||
| from tqdm import tqdm | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(5, 3) | ||||
|         self.fc2 = nn.Linear(3, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|         # log loss | ||||
|         mlflow.log_metric('train_loss', loss.item(), step=epoch) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-4 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start a new MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # create a signature to record model input and output | ||||
|         input_schema = Schema([ | ||||
|             ColSpec('float', 'age'), | ||||
|             ColSpec('float', 'mood level'), | ||||
|             ColSpec('float', 'health level'), | ||||
|             ColSpec('float', 'hungry level'), | ||||
|             ColSpec('float', 'sexy level') | ||||
|         ]) | ||||
|         output_schema = Schema([ColSpec('float', 'fortune')]) | ||||
|         signature = ModelSignature(inputs=input_schema, outputs=output_schema) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model', signature=signature) | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,21 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: 461190610cac47f5970e3386972070da | ||||
| run_id: 936ba15a1bf74591a10e1711133f4be4 | ||||
| signature: | ||||
|   inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"}, | ||||
|     {"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"}, | ||||
|     {"name": "sexy level", "type": "float"}]' | ||||
|   outputs: '[{"name": "fortune", "type": "float"}]' | ||||
| utc_time_created: '2023-02-22 02:42:59.306161' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,96 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| import mlflow | ||||
| from mlflow.models.signature import ModelSignature | ||||
| from mlflow.types.schema import Schema, ColSpec | ||||
| from tqdm import tqdm | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(5, 3) | ||||
|         self.fc2 = nn.Linear(3, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start a new MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # create a signature to record model input and output | ||||
|         input_schema = Schema([ | ||||
|             ColSpec('float', 'age'), | ||||
|             ColSpec('float', 'mood level'), | ||||
|             ColSpec('float', 'health level'), | ||||
|             ColSpec('float', 'hungry level'), | ||||
|             ColSpec('float', 'sexy level') | ||||
|         ]) | ||||
|         output_schema = Schema([ColSpec('float', 'fortune')]) | ||||
|         signature = ModelSignature(inputs=input_schema, outputs=output_schema) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model', signature=signature) | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,21 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: eb6ac492a8a5426d815dce50de4d2545 | ||||
| run_id: 9ea595f0043a477bb635754b7f77fa48 | ||||
| signature: | ||||
|   inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"}, | ||||
|     {"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"}, | ||||
|     {"name": "sexy level", "type": "float"}]' | ||||
|   outputs: '[{"name": "fortune", "type": "float"}]' | ||||
| utc_time_created: '2023-02-22 02:26:27.043234' | ||||
| @ -0,0 +1,11 @@ | ||||
| channels: | ||||
| - conda-forge | ||||
| dependencies: | ||||
| - python=3.10.9 | ||||
| - pip<=23.0.1 | ||||
| - pip: | ||||
|   - mlflow | ||||
|   - cloudpickle==2.2.1 | ||||
|   - torch==1.13.1 | ||||
|   - tqdm==4.64.1 | ||||
| name: mlflow-env | ||||
										
											Binary file not shown.
										
									
								
							| @ -0,0 +1 @@ | ||||
| mlflow.pytorch.pickle_module | ||||
| @ -0,0 +1,7 @@ | ||||
| python: 3.10.9 | ||||
| build_dependencies: | ||||
| - pip==23.0.1 | ||||
| - setuptools==67.3.2 | ||||
| - wheel==0.38.4 | ||||
| dependencies: | ||||
| - -r requirements.txt | ||||
| @ -0,0 +1,4 @@ | ||||
| mlflow | ||||
| cloudpickle==2.2.1 | ||||
| torch==1.13.1 | ||||
| tqdm==4.64.1 | ||||
| @ -0,0 +1,96 @@ | ||||
| # train.py | ||||
| # | ||||
| # author: deng | ||||
| # date  : 20230221 | ||||
|  | ||||
| import torch | ||||
| import torch.nn as nn | ||||
| from torch.optim import SGD | ||||
| import mlflow | ||||
| from mlflow.models.signature import ModelSignature | ||||
| from mlflow.types.schema import Schema, ColSpec | ||||
| from tqdm import tqdm | ||||
|  | ||||
|  | ||||
| class Net(nn.Module): | ||||
|     """ define a simple neural network model """ | ||||
|     def __init__(self): | ||||
|         super(Net, self).__init__() | ||||
|         self.fc1 = nn.Linear(5, 3) | ||||
|         self.fc2 = nn.Linear(3, 1) | ||||
|  | ||||
|     def forward(self, x): | ||||
|         x = self.fc1(x) | ||||
|         x = torch.relu(x) | ||||
|         x = self.fc2(x) | ||||
|         return x | ||||
|  | ||||
|  | ||||
| def train(model, dataloader, criterion, optimizer, epochs): | ||||
|     """ define the training function """ | ||||
|     for epoch in tqdm(range(epochs), 'Epochs'): | ||||
|  | ||||
|         for i, (inputs, labels) in enumerate(dataloader): | ||||
|  | ||||
|             # forwarding | ||||
|             outputs = model(inputs) | ||||
|             loss = criterion(outputs, labels) | ||||
|  | ||||
|             # update gradient | ||||
|             optimizer.zero_grad() | ||||
|             loss.backward() | ||||
|             optimizer.step() | ||||
|  | ||||
|             # log loss | ||||
|             mlflow.log_metric('train_loss', loss.item(), step=i) | ||||
|  | ||||
|     return loss | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     # set hyper parameters | ||||
|     learning_rate = 1e-2 | ||||
|     epochs = 20 | ||||
|  | ||||
|     # create a dataloader with fake data | ||||
|     dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)] | ||||
|     dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10) | ||||
|  | ||||
|     # create the model, criterion, and optimizer | ||||
|     model = Net() | ||||
|     criterion = nn.MSELoss() | ||||
|     optimizer = SGD(model.parameters(), lr=learning_rate) | ||||
|  | ||||
|     # set the tracking URI to the model registry | ||||
|     mlflow.set_tracking_uri('http://127.0.0.1:5000') | ||||
|     mlflow.set_experiment('train_fortune_predict_model') | ||||
|  | ||||
|     # start a new MLflow run | ||||
|     with mlflow.start_run(): | ||||
|  | ||||
|         # train the model | ||||
|         loss = train(model, dataloader, criterion, optimizer, epochs) | ||||
|  | ||||
|         # log some additional metrics | ||||
|         mlflow.log_metric('final_loss', loss.item()) | ||||
|         mlflow.log_param('learning_rate', learning_rate) | ||||
|  | ||||
|         # create a signature to record model input and output | ||||
|         input_schema = Schema([ | ||||
|             ColSpec('float', 'age'), | ||||
|             ColSpec('float', 'mood level'), | ||||
|             ColSpec('float', 'health level'), | ||||
|             ColSpec('float', 'hungry level'), | ||||
|             ColSpec('float', 'sexy level') | ||||
|         ]) | ||||
|         output_schema = Schema([ColSpec('float', 'fortune')]) | ||||
|         signature = ModelSignature(inputs=input_schema, outputs=output_schema) | ||||
|  | ||||
|         # log trained model | ||||
|         mlflow.pytorch.log_model(model, 'model', signature=signature) | ||||
|  | ||||
|         # log training code | ||||
|         mlflow.log_artifact('./train.py', 'code') | ||||
|  | ||||
|     print('Completed.') | ||||
| @ -0,0 +1,21 @@ | ||||
| artifact_path: model | ||||
| flavors: | ||||
|   python_function: | ||||
|     data: data | ||||
|     env: conda.yaml | ||||
|     loader_module: mlflow.pytorch | ||||
|     pickle_module_name: mlflow.pytorch.pickle_module | ||||
|     python_version: 3.10.9 | ||||
|   pytorch: | ||||
|     code: null | ||||
|     model_data: data | ||||
|     pytorch_version: 1.13.1 | ||||
| mlflow_version: 1.30.0 | ||||
| model_uuid: d240fe9348c24d1f871506e8d1155f56 | ||||
| run_id: ab28ca803756454881d83e7e85945df0 | ||||
| signature: | ||||
|   inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"}, | ||||
|     {"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"}, | ||||
|     {"name": "sexy level", "type": "float"}]' | ||||
|   outputs: '[{"name": "fortune", "type": "float"}]' | ||||
| utc_time_created: '2023-02-22 02:26:30.657849' | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user