update server data

This commit is contained in:
2023-02-22 16:26:02 +08:00
parent 7015b5c1a5
commit 2dd734b87b
134 changed files with 2502 additions and 0 deletions

View File

@ -0,0 +1,83 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
from tqdm import tqdm
import mlflow
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(10, 5)
self.fc2 = nn.Linear(5, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=i)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start the MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# log trained model
mlflow.pytorch.log_model(model, 'model')
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,16 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 75aa23c5bb33452c978feeeffcdcb393
run_id: 0be79b1f3f7d480a9c7f497312887a37
utc_time_created: '2023-02-22 01:12:26.682417'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,83 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
from tqdm import tqdm
import mlflow
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(10, 5)
self.fc2 = nn.Linear(5, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=i)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start the MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# log trained model
mlflow.pytorch.log_model(model, 'model')
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,16 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 20196b21b65b400eac57135de7cb2bac
run_id: 2af28d45b569431ab10056469d63cf44
utc_time_created: '2023-02-22 01:12:30.183766'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,98 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=epoch)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
batch_size = 10
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=batch_size)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
mlflow.log_param('batch_size', batch_size)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 4dc69e8ee0404f70b0c17f8119986704
run_id: 581cf34366f74ee7b90b18f0a606ddf8
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:46:27.876394'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,98 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=epoch)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
batch_size = 10
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=batch_size)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
mlflow.log_param('batch_size', batch_size)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 1334c03425a6454fafce1db8fa3a2428
run_id: 5c8b249efc724568aca1b8a4d8750b43
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:46:24.939018'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,96 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=epoch)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 3f51fe9a1b40461d8c445f37695d72e2
run_id: 7f0b13a30b10483589f7ca922050e721
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:43:39.269378'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,83 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
from tqdm import tqdm
import mlflow
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(10, 5)
self.fc2 = nn.Linear(5, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=i)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(10), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start the MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# log trained model
mlflow.pytorch.log_model(model, 'model')
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,16 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: a94b93c2d2674c97a5db43fda608ec76
run_id: 80447ac20c814d10a470208e303ea0ec
utc_time_created: '2023-02-22 01:12:22.906728'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,96 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=epoch)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-4
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 461190610cac47f5970e3386972070da
run_id: 936ba15a1bf74591a10e1711133f4be4
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:42:59.306161'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,96 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=i)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: eb6ac492a8a5426d815dce50de4d2545
run_id: 9ea595f0043a477bb635754b7f77fa48
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:26:27.043234'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,96 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=i)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: d240fe9348c24d1f871506e8d1155f56
run_id: ab28ca803756454881d83e7e85945df0
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:26:30.657849'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,96 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=epoch)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-3
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 2d856775bf3840668e99c8a230c6c663
run_id: ac13f624d0114ca494441db540b6629c
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:43:17.551796'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,98 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=epoch)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
batch_size = 10
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=batch_size)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
mlflow.log_param('batch_size', batch_size)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 1b7647843b35448d915254eda4751ed8
run_id: b2961f1b13a84398b7d7ee5be03ab0da
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:46:21.578911'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,96 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=epoch)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 74c6ba14fd8043cda903f9b6dfb93bae
run_id: c362618cffca44f69a4afabcd45defd7
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:42:16.398211'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1

View File

@ -0,0 +1,96 @@
# train.py
#
# author: deng
# date : 20230221
import torch
import torch.nn as nn
from torch.optim import SGD
import mlflow
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, ColSpec
from tqdm import tqdm
class Net(nn.Module):
""" define a simple neural network model """
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(5, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return x
def train(model, dataloader, criterion, optimizer, epochs):
""" define the training function """
for epoch in tqdm(range(epochs), 'Epochs'):
for i, (inputs, labels) in enumerate(dataloader):
# forwarding
outputs = model(inputs)
loss = criterion(outputs, labels)
# update gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log loss
mlflow.log_metric('train_loss', loss.item(), step=i)
return loss
if __name__ == '__main__':
# set hyper parameters
learning_rate = 1e-2
epochs = 20
# create a dataloader with fake data
dataloader = [(torch.randn(5), torch.randn(1)) for _ in range(100)]
dataloader = torch.utils.data.DataLoader(dataloader, batch_size=10)
# create the model, criterion, and optimizer
model = Net()
criterion = nn.MSELoss()
optimizer = SGD(model.parameters(), lr=learning_rate)
# set the tracking URI to the model registry
mlflow.set_tracking_uri('http://127.0.0.1:5000')
mlflow.set_experiment('train_fortune_predict_model')
# start a new MLflow run
with mlflow.start_run():
# train the model
loss = train(model, dataloader, criterion, optimizer, epochs)
# log some additional metrics
mlflow.log_metric('final_loss', loss.item())
mlflow.log_param('learning_rate', learning_rate)
# create a signature to record model input and output
input_schema = Schema([
ColSpec('float', 'age'),
ColSpec('float', 'mood level'),
ColSpec('float', 'health level'),
ColSpec('float', 'hungry level'),
ColSpec('float', 'sexy level')
])
output_schema = Schema([ColSpec('float', 'fortune')])
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
# log trained model
mlflow.pytorch.log_model(model, 'model', signature=signature)
# log training code
mlflow.log_artifact('./train.py', 'code')
print('Completed.')

View File

@ -0,0 +1,21 @@
artifact_path: model
flavors:
python_function:
data: data
env: conda.yaml
loader_module: mlflow.pytorch
pickle_module_name: mlflow.pytorch.pickle_module
python_version: 3.10.9
pytorch:
code: null
model_data: data
pytorch_version: 1.13.1
mlflow_version: 1.30.0
model_uuid: 25e2ad2c703b4e79b56de97a6a6902d8
run_id: e8b7a070f5ff48c69c5b945d1fe2f392
signature:
inputs: '[{"name": "age", "type": "float"}, {"name": "mood level", "type": "float"},
{"name": "health level", "type": "float"}, {"name": "hungry level", "type": "float"},
{"name": "sexy level", "type": "float"}]'
outputs: '[{"name": "fortune", "type": "float"}]'
utc_time_created: '2023-02-22 02:24:12.697856'

View File

@ -0,0 +1,11 @@
channels:
- conda-forge
dependencies:
- python=3.10.9
- pip<=23.0.1
- pip:
- mlflow
- cloudpickle==2.2.1
- torch==1.13.1
- tqdm==4.64.1
name: mlflow-env

View File

@ -0,0 +1 @@
mlflow.pytorch.pickle_module

View File

@ -0,0 +1,7 @@
python: 3.10.9
build_dependencies:
- pip==23.0.1
- setuptools==67.3.2
- wheel==0.38.4
dependencies:
- -r requirements.txt

View File

@ -0,0 +1,4 @@
mlflow
cloudpickle==2.2.1
torch==1.13.1
tqdm==4.64.1