@@ -42,17 +42,17 @@ namespace ml
4242std::string OnnxModel::printShape(const std::vector<int64_t>& v)
4343{
4444 std::stringstream ss("");
45- for (size_t i = 0; i < v.size() - 1; i++)
45+ for (std:: size_t i = 0; i < v.size() - 1; i++)
4646 ss << v[i] << "x";
4747 ss << v[v.size() - 1];
4848 return ss.str();
4949}
5050
51- bool OnnxModel::checkHyperloop(bool verbose)
51+ bool OnnxModel::checkHyperloop(const bool verbose)
5252{
5353 /// Testing hyperloop core settings
5454 const char* alienCores = gSystem->Getenv("ALIEN_JDL_CPUCORES");
55- bool alienCoresFound = (alienCores != NULL);
55+ const bool alienCoresFound = (alienCores != NULL);
5656 if (alienCoresFound) {
5757 if (verbose) {
5858 LOGP(info, "Hyperloop test/Grid job detected! Number of cores = {}. Setting threads anyway to 1.", alienCores);
@@ -68,7 +68,7 @@ bool OnnxModel::checkHyperloop(bool verbose)
6868 return alienCoresFound;
6969}
7070
71- void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int threads, uint64_t from, uint64_t until)
71+ void OnnxModel::initModel(const std::string& localPath, const bool enableOptimizations, const int threads, const uint64_t from, const uint64_t until)
7272{
7373
7474 assert(from <= until);
@@ -90,26 +90,26 @@ void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int t
9090 mEnv = std::make_shared<Ort::Env>(ORT_LOGGING_LEVEL_WARNING, "onnx-model");
9191 mSession = std::make_shared<Ort::Session>(*mEnv, modelPath.c_str(), sessionOptions);
9292
93- Ort::AllocatorWithDefaultOptions tmpAllocator;
94- for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
93+ Ort::AllocatorWithDefaultOptions const tmpAllocator;
94+ for (std:: size_t i = 0; i < mSession->GetInputCount(); ++i) {
9595 mInputNames.push_back(mSession->GetInputNameAllocated(i, tmpAllocator).get());
9696 }
97- for (size_t i = 0; i < mSession->GetInputCount(); ++i) {
97+ for (std:: size_t i = 0; i < mSession->GetInputCount(); ++i) {
9898 mInputShapes.emplace_back(mSession->GetInputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
9999 }
100- for (size_t i = 0; i < mSession->GetOutputCount(); ++i) {
100+ for (std:: size_t i = 0; i < mSession->GetOutputCount(); ++i) {
101101 mOutputNames.push_back(mSession->GetOutputNameAllocated(i, tmpAllocator).get());
102102 }
103- for (size_t i = 0; i < mSession->GetOutputCount(); ++i) {
103+ for (std:: size_t i = 0; i < mSession->GetOutputCount(); ++i) {
104104 mOutputShapes.emplace_back(mSession->GetOutputTypeInfo(i).GetTensorTypeAndShapeInfo().GetShape());
105105 }
106106 LOG(info) << "Input Nodes:";
107- for (size_t i = 0; i < mInputNames.size(); i++) {
107+ for (std:: size_t i = 0; i < mInputNames.size(); i++) {
108108 LOG(info) << "\t" << mInputNames[i] << " : " << printShape(mInputShapes[i]);
109109 }
110110
111111 LOG(info) << "Output Nodes:";
112- for (size_t i = 0; i < mOutputNames.size(); i++) {
112+ for (std:: size_t i = 0; i < mOutputNames.size(); i++) {
113113 LOG(info) << "\t" << mOutputNames[i] << " : " << printShape(mOutputShapes[i]);
114114 }
115115
@@ -121,7 +121,7 @@ void OnnxModel::initModel(std::string localPath, bool enableOptimizations, int t
121121 LOG(info) << "--- Model initialized! ---";
122122}
123123
124- void OnnxModel::setActiveThreads(int threads)
124+ void OnnxModel::setActiveThreads(const int threads)
125125{
126126 activeThreads = threads;
127127 if (!checkHyperloop(false)) {
0 commit comments