diff --git a/src/02hardware/src/mem_offset_calculator.cc b/src/02hardware/src/mem_offset_calculator.cc index 846d72072..bd7571969 100644 --- a/src/02hardware/src/mem_offset_calculator.cc +++ b/src/02hardware/src/mem_offset_calculator.cc @@ -119,7 +119,7 @@ namespace refactor::hardware { } void OffsetCalculator::trace(std::string event) { - logi("CALCULATOR {} {} {:>5} {:>5} {:>#10} {:>#6f} {:>5} {:>#10} {:>#10}", + logd("CALCULATOR {} {} {:>5} {:>5} {:>#10} {:>#6f} {:>5} {:>#10} {:>#10}", reinterpret_cast(this), event, _traceInfo->allocTimes, _traceInfo->freeTimes, diff --git a/src/06frontend/src/graph.cc b/src/06frontend/src/graph.cc index 4848dc5b2..713624ac5 100644 --- a/src/06frontend/src/graph.cc +++ b/src/06frontend/src/graph.cc @@ -1,6 +1,7 @@ #include "frontend/graph.h" #include "frontend/tensor.h" #include +#include #include #include #include @@ -26,6 +27,19 @@ namespace refactor::frontend { c = '_'; } } + if (auto env = std::getenv("LOG_LEVEL"); env) { + if (std::strcmp(env, "DBG") == 0) { + fmtlog::setLogLevel(fmtlog::DBG); + } else if (std::strcmp(env, "INF") == 0) { + fmtlog::setLogLevel(fmtlog::INF); + } else if (std::strcmp(env, "WRN") == 0) { + fmtlog::setLogLevel(fmtlog::WRN); + } else if (std::strcmp(env, "ERR") == 0) { + fmtlog::setLogLevel(fmtlog::ERR); + } else if (std::strcmp(env, "OFF") == 0) { + fmtlog::setLogLevel(fmtlog::OFF); + } + } fmtlog::setLogFile(dir.append(name).c_str(), false); fmtlog::startPollingThread(); logi("process start with log file {}", name); @@ -208,65 +222,54 @@ namespace refactor::frontend { void Graph::logGraph() const { std::unordered_set frontNodes, dynamicNodes; std::unordered_set dataEdges; - auto it = _internal.topology.begin(); - auto const end = _internal.topology.end(); - { - logi("compute on device: "); - auto i = 0; - while (it != end) { - auto [nodeIdx, inputs, outputs] = *it++; - if (!std::all_of(outputs.begin(), outputs.end(), - [this](auto i) { return _internal.edges[i].tensor->data; })) { - auto const &node = _internal.nodes[nodeIdx]; - logi("{:>8}. {}", i++, node.name); - auto opType = node.op->opTypeName(); - dynamicNodes.insert(opType); - auto front = true; - for (auto i : inputs) { - if (_internal.edges[i].tensor->data) { - dataEdges.insert(i); - } else { - front = false; - } - } - if (front) { - frontNodes.insert(opType); + + logi("compute on device: "); + for (auto i = 0; auto [nodeIdx, inputs, outputs] : _internal.topology) { + if (!std::all_of(outputs.begin(), outputs.end(), + [this](auto i) { return _internal.edges[i].tensor->data; })) { + auto const &node = _internal.nodes[nodeIdx]; + logi("{:>8}. {}", i++, node.name); + auto opType = node.op->opTypeName(); + dynamicNodes.insert(opType); + auto front = true; + for (auto i : inputs) { + if (_internal.edges[i].tensor->data) { + dataEdges.insert(i); + } else { + front = false; } } - } - } - { - logi("types:"); - auto i = 0; - for (auto const &node : dynamicNodes) { - if (frontNodes.erase(node)) { - logi("{:>8}.*{}", i++, node); - } else { - logi("{:>8}. {}", i++, node); + if (front) { + frontNodes.insert(opType); } } } - { - logi("edges to copy:"); - auto i = 0; - for (auto edgeIdx : dataEdges) { - auto const &edge = _internal.edges[edgeIdx]; - std::string depVariables = "[ "; - for (auto const &var : edge.tensor->depVariables) { - depVariables += var->name; - depVariables += ' '; - } - depVariables += ']'; - logi("{:>8}. {} {} ** {}", i++, edge.name, shapeFormat(edge.tensor->shape), depVariables); + + logi("types:"); + for (auto i = 0; auto const &node : dynamicNodes) { + if (frontNodes.erase(node)) { + logi("{:>8}.*{}", i++, node); + } else { + logi("{:>8}. {}", i++, node); } } - { - logi("outputs:"); - auto i = 0; - for (auto edgeIdx : it.globalOutputs()) { - auto const &edge = _internal.edges[edgeIdx]; - logi(" outputs[{:>2}] = edge[{:>2}] = {} with {}", i++, edgeIdx, edge.name, shapeFormat(edge.tensor->shape)); + + logi("edges to copy:"); + for (auto i = 0; auto edgeIdx : dataEdges) { + auto const &edge = _internal.edges[edgeIdx]; + std::string depVariables = "[ "; + for (auto const &var : edge.tensor->depVariables) { + depVariables += var->name; + depVariables += ' '; } + depVariables += ']'; + logi("{:>8}. {} {} ** {}", i++, edge.name, shapeFormat(edge.tensor->shape), depVariables); + } + + logi("outputs:"); + for (auto i = 0; auto edgeIdx : _internal.topology.globalOutputs()) { + auto const &edge = _internal.edges[edgeIdx]; + logi(" outputs[{:>2}] = edge[{:>2}] = {} with {}", i++, edgeIdx, edge.name, shapeFormat(edge.tensor->shape)); } } }// namespace refactor::frontend diff --git a/utilities/src/infer.rs b/utilities/src/infer.rs index 6b5ab1344..42283e01c 100644 --- a/utilities/src/infer.rs +++ b/utilities/src/infer.rs @@ -47,17 +47,20 @@ model_path = Path(\"{}\").resolve() ) } -pub fn infer(path: impl AsRef) { +pub fn infer(path: impl AsRef, log: Option) { let path = path.as_ref(); assert!( path.is_file() && path.extension() == Some(OsStr::new("onnx")), "\"{}\" is not a onnx file", path.display(), ); - Command::new("python") + let mut python = Command::new("python"); + python .current_dir(proj_dir()) .arg("-c") - .arg(format!("{}{}", model_path(path), SCRIPT)) - .status() - .unwrap(); + .arg(format!("{}{}", model_path(path), SCRIPT)); + if let Some(log) = log { + python.env("LOG_LEVEL", log.to_uppercase()); + } + python.status().unwrap(); } diff --git a/utilities/src/main.rs b/utilities/src/main.rs index 315c85c37..09342263b 100644 --- a/utilities/src/main.rs +++ b/utilities/src/main.rs @@ -4,7 +4,6 @@ mod make; use clap::{Parser, Subcommand}; use std::{ - ffi::OsString, fs, io::ErrorKind, path::{Path, PathBuf}, @@ -32,7 +31,7 @@ enum Commands { install_python: bool, /// devices support #[clap(long)] - dev: Option>, + dev: Option>, /// specify c++ compiler #[clap(long)] cxx_compiler: Option, @@ -44,7 +43,11 @@ enum Commands { /// format source files Format, /// run model inference - Infer { path: PathBuf }, + Infer { + path: PathBuf, + #[clap(long)] + log: Option, + }, } pub fn proj_dir() -> &'static Path { @@ -78,6 +81,6 @@ fn main() { Commands::Format => format::format(), - Commands::Infer { path } => infer::infer(path), + Commands::Infer { path, log } => infer::infer(path, log), } } diff --git a/utilities/src/make.rs b/utilities/src/make.rs index 8be28a1d6..75ecacf59 100644 --- a/utilities/src/make.rs +++ b/utilities/src/make.rs @@ -1,11 +1,5 @@ use crate::proj_dir; -use std::{ - collections::HashSet, - ffi::{OsStr, OsString}, - fs, - path::PathBuf, - process::Command, -}; +use std::{collections::HashSet, ffi::OsStr, fs, path::PathBuf, process::Command}; #[derive(PartialEq, Eq, Hash, Debug)] enum Target { @@ -16,7 +10,7 @@ enum Target { pub fn make( release: bool, install_python: bool, - dev: Option>, + dev: Option>, cxx_compiler: Option, ) { let release = if release { "Release" } else { "Debug" }; @@ -24,16 +18,11 @@ pub fn make( .unwrap_or_default() .into_iter() .map(|d| d.to_ascii_lowercase()) - .filter_map(|d| { - if d == OsStr::new("cuda") || d == OsStr::new("nvidia") { - Some(Target::Nvidia) - } else if d == OsStr::new("kunlun") - || d == OsStr::new("kunlunxin") - || d == OsStr::new("baidu") - { - Some(Target::Baidu) - } else { - eprintln!("warning: unknown device: {:?}", d); + .filter_map(|d| match d.as_str() { + "cuda" | "nvidia" => Some(Target::Nvidia), + "kunlun" | "kunlunxin" | "baidu" => Some(Target::Baidu), + _ => { + eprintln!("Unknown device: {}", d); None } }) @@ -44,22 +33,24 @@ pub fn make( let build = proj_dir.join("build"); fs::create_dir_all(&build).unwrap(); - let mut cmd = Command::new("cmake"); - cmd.current_dir(&proj_dir) - .arg("-Bbuild") - .arg(format!("-DCMAKE_BUILD_TYPE={release}")) - .arg(format!("-DUSE_CUDA={}", dev(Target::Nvidia))) - .arg(format!("-DUSE_KUNLUN={}", dev(Target::Baidu))); - if let Some(cxx_compiler) = cxx_compiler { - cmd.arg(format!("-DCMAKE_CXX_COMPILER={}", cxx_compiler.display())); + { + let mut cmake = Command::new("cmake"); + cmake + .current_dir(&proj_dir) + .arg("-Bbuild") + .arg(format!("-DCMAKE_BUILD_TYPE={release}")) + .arg(format!("-DUSE_CUDA={}", dev(Target::Nvidia))) + .arg(format!("-DUSE_KUNLUN={}", dev(Target::Baidu))); + if let Some(cxx_compiler) = cxx_compiler { + cmake.arg(format!("-DCMAKE_CXX_COMPILER={}", cxx_compiler.display())); + } + assert!(cmake.status().unwrap().success()); + } + { + let mut make = Command::new("make"); + make.current_dir(&build).arg("-j"); + assert!(make.status().unwrap().success()); } - cmd.status().unwrap(); - - Command::new("make") - .current_dir(&build) - .arg("-j") - .status() - .unwrap(); if install_python { let from = fs::read_dir(build.join("src/09python_ffi"))