Skip to content

Commit a33ba34

Browse files
committed
[tmva] Implement linter suggestions in Python files
1 parent 64e70e5 commit a33ba34

File tree

6 files changed

+36
-53
lines changed

6 files changed

+36
-53
lines changed

tmva/pymva/test/generatePyTorchModelClassification.py

Lines changed: 11 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,7 @@
22
from torch import nn
33

44
# Define model
5-
model = nn.Sequential(
6-
nn.Linear(4, 64),
7-
nn.ReLU(),
8-
nn.Linear(64, 2),
9-
nn.Softmax(dim=1))
5+
model = nn.Sequential(nn.Linear(4, 64), nn.ReLU(), nn.Linear(64, 2), nn.Softmax(dim=1))
106

117
# Construct loss function and Optimizer.
128
criterion = torch.nn.MSELoss()
@@ -33,8 +29,8 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
3329

3430
# print train statistics
3531
running_train_loss += train_loss.item()
36-
if i % 32 == 31: # print every 32 mini-batches
37-
print(f"[{epoch+1}, {i+1}] train loss: {running_train_loss / 32 :.3f}")
32+
if i % 32 == 31: # print every 32 mini-batches
33+
print(f"[{epoch + 1}, {i + 1}] train loss: {running_train_loss / 32:.3f}")
3834
running_train_loss = 0.0
3935

4036
if schedule:
@@ -51,23 +47,23 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
5147

5248
curr_val = running_val_loss / len(val_loader)
5349
if save_best:
54-
if best_val==None:
55-
best_val = curr_val
56-
best_val = save_best(model, curr_val, best_val)
50+
if best_val is None:
51+
best_val = curr_val
52+
best_val = save_best(model, curr_val, best_val)
5753

5854
# print val statistics per epoch
59-
print(f"[{epoch+1}] val loss: {curr_val :.3f}")
55+
print(f"[{epoch + 1}] val loss: {curr_val:.3f}")
6056
running_val_loss = 0.0
6157

62-
print(f"Finished Training on {epoch+1} Epochs!")
58+
print(f"Finished Training on {epoch + 1} Epochs!")
6359

6460
return model
6561

6662

6763
def predict(model, test_X, batch_size=32):
6864
# Set to eval mode
6965
model.eval()
70-
66+
7167
test_dataset = torch.utils.data.TensorDataset(torch.Tensor(test_X))
7268
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
7369

@@ -78,13 +74,12 @@ def predict(model, test_X, batch_size=32):
7874
outputs = model(X)
7975
predictions.append(outputs)
8076
preds = torch.cat(predictions)
81-
77+
8278
return preds.numpy()
8379

8480

8581
load_model_custom_objects = {"optimizer": optimizer, "criterion": criterion, "train_func": fit, "predict_func": predict}
8682

8783
# Store model to file
8884
m = torch.jit.script(model)
89-
torch.jit.save(m,"PyTorchModelClassification.pt")
90-
85+
torch.jit.save(m, "PyTorchModelClassification.pt")

tmva/pymva/test/generatePyTorchModelMulticlass.py

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,7 @@
22
from torch import nn
33

44
# Define model
5-
model = nn.Sequential(
6-
nn.Linear(4, 64),
7-
nn.ReLU(),
8-
nn.Linear(64, 4),
9-
nn.Softmax(dim=1))
5+
model = nn.Sequential(nn.Linear(4, 64), nn.ReLU(), nn.Linear(64, 4), nn.Softmax(dim=1))
106

117
# Construct loss function and Optimizer.
128
criterion = nn.CrossEntropyLoss()
@@ -34,7 +30,7 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
3430

3531
# print train statistics
3632
running_train_loss += train_loss.item()
37-
if i % 4 == 3: # print every 4 mini-batches
33+
if i % 4 == 3: # print every 4 mini-batches
3834
print(f"[{epoch+1}, {i+1}] train loss: {running_train_loss / 4 :.3f}")
3935
running_train_loss = 0.0
4036

@@ -53,9 +49,9 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
5349

5450
curr_val = running_val_loss / len(val_loader)
5551
if save_best:
56-
if best_val==None:
57-
best_val = curr_val
58-
best_val = save_best(model, curr_val, best_val)
52+
if best_val is None:
53+
best_val = curr_val
54+
best_val = save_best(model, curr_val, best_val)
5955

6056
# print val statistics per epoch
6157
print(f"[{epoch+1}] val loss: {curr_val :.3f}")
@@ -69,7 +65,7 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
6965
def predict(model, test_X, batch_size=32):
7066
# Set to eval mode
7167
model.eval()
72-
68+
7369
test_dataset = torch.utils.data.TensorDataset(torch.Tensor(test_X))
7470
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
7571

@@ -80,13 +76,12 @@ def predict(model, test_X, batch_size=32):
8076
outputs = model(X)
8177
predictions.append(outputs)
8278
preds = torch.cat(predictions)
83-
79+
8480
return preds.numpy()
8581

8682

8783
load_model_custom_objects = {"optimizer": optimizer, "criterion": criterion, "train_func": fit, "predict_func": predict}
8884

8985
# Store model to file
9086
m = torch.jit.script(model)
91-
torch.jit.save(m,"PyTorchModelMulticlass.pt")
92-
87+
torch.jit.save(m, "PyTorchModelMulticlass.pt")

tmva/pymva/test/generatePyTorchModelRegression.py

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,7 @@
22
from torch import nn
33

44
# Define model
5-
model = nn.Sequential(
6-
nn.Linear(2, 64),
7-
nn.Tanh(),
8-
nn.Linear(64, 1))
5+
model = nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1))
96

107
# Construct loss function and Optimizer.
118
criterion = torch.nn.MSELoss()
@@ -32,7 +29,7 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
3229

3330
# print train statistics
3431
running_train_loss += train_loss.item()
35-
if i % 32 == 31: # print every 32 mini-batches
32+
if i % 32 == 31: # print every 32 mini-batches
3633
print(f"[{epoch+1}, {i+1}] train loss: {running_train_loss / 32 :.3f}")
3734
running_train_loss = 0.0
3835

@@ -50,9 +47,9 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
5047

5148
curr_val = running_val_loss / len(val_loader)
5249
if save_best:
53-
if best_val==None:
54-
best_val = curr_val
55-
best_val = save_best(model, curr_val, best_val)
50+
if best_val is None:
51+
best_val = curr_val
52+
best_val = save_best(model, curr_val, best_val)
5653

5754
# print val statistics per epoch
5855
print(f"[{epoch+1}] val loss: {curr_val :.3f}")
@@ -66,7 +63,7 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit
6663
def predict(model, test_X, batch_size=32):
6764
# Set to eval mode
6865
model.eval()
69-
66+
7067
test_dataset = torch.utils.data.TensorDataset(torch.Tensor(test_X))
7168
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
7269

@@ -77,12 +74,12 @@ def predict(model, test_X, batch_size=32):
7774
outputs = model(X)
7875
predictions.append(outputs)
7976
preds = torch.cat(predictions)
80-
77+
8178
return preds.numpy()
8279

8380

8481
load_model_custom_objects = {"optimizer": optimizer, "criterion": criterion, "train_func": fit, "predict_func": predict}
8582

8683
# Store model to file
8784
m = torch.jit.script(model)
88-
torch.jit.save(m,"PyTorchModelRegression.pt")
85+
torch.jit.save(m, "PyTorchModelRegression.pt")

tutorials/machine_learning/TMVA_SOFIE_Inference.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,8 @@
1414
### \macro_output
1515
### \author Lorenzo Moneta
1616

17-
import ROOT
1817
import numpy as np
19-
18+
import ROOT
2019

2120
# check if the input file exists
2221
modelFile = "Higgs_trained_model.h5"

tutorials/machine_learning/TMVA_SOFIE_Models.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,18 +13,16 @@
1313
### \macro_output
1414
### \author Lorenzo Moneta
1515

16-
import ROOT
17-
from os.path import exists
18-
19-
20-
## generate and train Keras models with different architectures
16+
import os
2117

2218
import numpy as np
23-
from tensorflow.keras.models import Sequential
19+
import ROOT
20+
from sklearn.model_selection import train_test_split
2421
from tensorflow.keras.layers import Dense
22+
from tensorflow.keras.models import Sequential
2523
from tensorflow.keras.optimizers import Adam
2624

27-
from sklearn.model_selection import train_test_split
25+
## generate and train Keras models with different architectures
2826

2927
def CreateModel(nlayers = 4, nunits = 64):
3028
model = Sequential()
@@ -101,7 +99,6 @@ def GenerateModelCode(modelFile, generatedHeaderFile):
10199

102100
generatedHeaderFile = "Higgs_Model.hxx"
103101
#need to remove existing header file since we are appending on same one
104-
import os
105102
if (os.path.exists(generatedHeaderFile)):
106103
weightFile = "Higgs_Model.root"
107104
print("removing existing files", generatedHeaderFile,weightFile)

tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,13 @@
1010
### \macro_output
1111
### \author Lorenzo Moneta
1212

13-
import ROOT
1413
from os.path import exists
1514

15+
import ROOT
1616

1717
# check if the input file exists
1818
modelFile = "Higgs_trained_model.h5"
19-
modelName = "Higgs_trained_model";
19+
modelName = "Higgs_trained_model"
2020

2121
if not exists(modelFile):
2222
raise FileNotFoundError("You need to run TMVA_Higgs_Classification.C to generate the Keras trained model")
@@ -43,7 +43,7 @@
4343
h2 = df2.Define("DNN_Value", "sofie_functor(rdfslot_,m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb)").Histo1D(("h_bkg", "", 100, 0, 1),"DNN_Value")
4444

4545
# run over the input data once, combining both RDataFrame graphs.
46-
ROOT.RDF.RunGraphs([h1, h2]);
46+
ROOT.RDF.RunGraphs([h1, h2])
4747

4848
print("Number of signal entries",h1.GetEntries())
4949
print("Number of background entries",h2.GetEntries())

0 commit comments

Comments
 (0)