Skip to content

Commit

Permalink
feat(utils): 支持通过环境变量设置日志级别
Browse files Browse the repository at this point in the history
Signed-off-by: YdrMaster <ydrml@hotmail.com>
  • Loading branch information
YdrMaster committed Dec 4, 2023
1 parent 0bafaad commit c1403d7
Show file tree
Hide file tree
Showing 5 changed files with 94 additions and 94 deletions.
2 changes: 1 addition & 1 deletion src/02hardware/src/mem_offset_calculator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ namespace refactor::hardware {
}

void OffsetCalculator::trace(std::string event) {
logi("CALCULATOR {} {} {:>5} {:>5} {:>#10} {:>#6f} {:>5} {:>#10} {:>#10}",
logd("CALCULATOR {} {} {:>5} {:>5} {:>#10} {:>#6f} {:>5} {:>#10} {:>#10}",
reinterpret_cast<void *>(this),
event,
_traceInfo->allocTimes, _traceInfo->freeTimes,
Expand Down
105 changes: 54 additions & 51 deletions src/06frontend/src/graph.cc
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#include "frontend/graph.h"
#include "frontend/tensor.h"
#include <chrono>
#include <cstdlib>
#include <execution>
#include <filesystem>
#include <fmtlog.h>
Expand All @@ -26,6 +27,19 @@ namespace refactor::frontend {
c = '_';
}
}
if (auto env = std::getenv("LOG_LEVEL"); env) {
if (std::strcmp(env, "DBG") == 0) {
fmtlog::setLogLevel(fmtlog::DBG);
} else if (std::strcmp(env, "INF") == 0) {
fmtlog::setLogLevel(fmtlog::INF);
} else if (std::strcmp(env, "WRN") == 0) {
fmtlog::setLogLevel(fmtlog::WRN);
} else if (std::strcmp(env, "ERR") == 0) {
fmtlog::setLogLevel(fmtlog::ERR);
} else if (std::strcmp(env, "OFF") == 0) {
fmtlog::setLogLevel(fmtlog::OFF);
}
}
fmtlog::setLogFile(dir.append(name).c_str(), false);
fmtlog::startPollingThread();
logi("process start with log file {}", name);
Expand Down Expand Up @@ -208,65 +222,54 @@ namespace refactor::frontend {
void Graph::logGraph() const {
std::unordered_set<std::string_view> frontNodes, dynamicNodes;
std::unordered_set<size_t> dataEdges;
auto it = _internal.topology.begin();
auto const end = _internal.topology.end();
{
logi("compute on device: ");
auto i = 0;
while (it != end) {
auto [nodeIdx, inputs, outputs] = *it++;
if (!std::all_of(outputs.begin(), outputs.end(),
[this](auto i) { return _internal.edges[i].tensor->data; })) {
auto const &node = _internal.nodes[nodeIdx];
logi("{:>8}. {}", i++, node.name);
auto opType = node.op->opTypeName();
dynamicNodes.insert(opType);
auto front = true;
for (auto i : inputs) {
if (_internal.edges[i].tensor->data) {
dataEdges.insert(i);
} else {
front = false;
}
}
if (front) {
frontNodes.insert(opType);

logi("compute on device: ");
for (auto i = 0; auto [nodeIdx, inputs, outputs] : _internal.topology) {
if (!std::all_of(outputs.begin(), outputs.end(),
[this](auto i) { return _internal.edges[i].tensor->data; })) {
auto const &node = _internal.nodes[nodeIdx];
logi("{:>8}. {}", i++, node.name);
auto opType = node.op->opTypeName();
dynamicNodes.insert(opType);
auto front = true;
for (auto i : inputs) {
if (_internal.edges[i].tensor->data) {
dataEdges.insert(i);
} else {
front = false;
}
}
}
}
{
logi("types:");
auto i = 0;
for (auto const &node : dynamicNodes) {
if (frontNodes.erase(node)) {
logi("{:>8}.*{}", i++, node);
} else {
logi("{:>8}. {}", i++, node);
if (front) {
frontNodes.insert(opType);
}
}
}
{
logi("edges to copy:");
auto i = 0;
for (auto edgeIdx : dataEdges) {
auto const &edge = _internal.edges[edgeIdx];
std::string depVariables = "[ ";
for (auto const &var : edge.tensor->depVariables) {
depVariables += var->name;
depVariables += ' ';
}
depVariables += ']';
logi("{:>8}. {} {} ** {}", i++, edge.name, shapeFormat(edge.tensor->shape), depVariables);

logi("types:");
for (auto i = 0; auto const &node : dynamicNodes) {
if (frontNodes.erase(node)) {
logi("{:>8}.*{}", i++, node);
} else {
logi("{:>8}. {}", i++, node);
}
}
{
logi("outputs:");
auto i = 0;
for (auto edgeIdx : it.globalOutputs()) {
auto const &edge = _internal.edges[edgeIdx];
logi(" outputs[{:>2}] = edge[{:>2}] = {} with {}", i++, edgeIdx, edge.name, shapeFormat(edge.tensor->shape));

logi("edges to copy:");
for (auto i = 0; auto edgeIdx : dataEdges) {
auto const &edge = _internal.edges[edgeIdx];
std::string depVariables = "[ ";
for (auto const &var : edge.tensor->depVariables) {
depVariables += var->name;
depVariables += ' ';
}
depVariables += ']';
logi("{:>8}. {} {} ** {}", i++, edge.name, shapeFormat(edge.tensor->shape), depVariables);
}

logi("outputs:");
for (auto i = 0; auto edgeIdx : _internal.topology.globalOutputs()) {
auto const &edge = _internal.edges[edgeIdx];
logi(" outputs[{:>2}] = edge[{:>2}] = {} with {}", i++, edgeIdx, edge.name, shapeFormat(edge.tensor->shape));
}
}
}// namespace refactor::frontend
13 changes: 8 additions & 5 deletions utilities/src/infer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,20 @@ model_path = Path(\"{}\").resolve()
)
}

pub fn infer(path: impl AsRef<Path>) {
pub fn infer(path: impl AsRef<Path>, log: Option<String>) {
let path = path.as_ref();
assert!(
path.is_file() && path.extension() == Some(OsStr::new("onnx")),
"\"{}\" is not a onnx file",
path.display(),
);
Command::new("python")
let mut python = Command::new("python");
python
.current_dir(proj_dir())
.arg("-c")
.arg(format!("{}{}", model_path(path), SCRIPT))
.status()
.unwrap();
.arg(format!("{}{}", model_path(path), SCRIPT));
if let Some(log) = log {
python.env("LOG_LEVEL", log.to_uppercase());
}
python.status().unwrap();
}
11 changes: 7 additions & 4 deletions utilities/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ mod make;

use clap::{Parser, Subcommand};
use std::{
ffi::OsString,
fs,
io::ErrorKind,
path::{Path, PathBuf},
Expand Down Expand Up @@ -32,7 +31,7 @@ enum Commands {
install_python: bool,
/// devices support
#[clap(long)]
dev: Option<Vec<OsString>>,
dev: Option<Vec<String>>,
/// specify c++ compiler
#[clap(long)]
cxx_compiler: Option<PathBuf>,
Expand All @@ -44,7 +43,11 @@ enum Commands {
/// format source files
Format,
/// run model inference
Infer { path: PathBuf },
Infer {
path: PathBuf,
#[clap(long)]
log: Option<String>,
},
}

pub fn proj_dir() -> &'static Path {
Expand Down Expand Up @@ -78,6 +81,6 @@ fn main() {

Commands::Format => format::format(),

Commands::Infer { path } => infer::infer(path),
Commands::Infer { path, log } => infer::infer(path, log),
}
}
57 changes: 24 additions & 33 deletions utilities/src/make.rs
Original file line number Diff line number Diff line change
@@ -1,11 +1,5 @@
use crate::proj_dir;
use std::{
collections::HashSet,
ffi::{OsStr, OsString},
fs,
path::PathBuf,
process::Command,
};
use std::{collections::HashSet, ffi::OsStr, fs, path::PathBuf, process::Command};

#[derive(PartialEq, Eq, Hash, Debug)]
enum Target {
Expand All @@ -16,24 +10,19 @@ enum Target {
pub fn make(
release: bool,
install_python: bool,
dev: Option<Vec<OsString>>,
dev: Option<Vec<String>>,
cxx_compiler: Option<PathBuf>,
) {
let release = if release { "Release" } else { "Debug" };
let dev = dev
.unwrap_or_default()
.into_iter()
.map(|d| d.to_ascii_lowercase())
.filter_map(|d| {
if d == OsStr::new("cuda") || d == OsStr::new("nvidia") {
Some(Target::Nvidia)
} else if d == OsStr::new("kunlun")
|| d == OsStr::new("kunlunxin")
|| d == OsStr::new("baidu")
{
Some(Target::Baidu)
} else {
eprintln!("warning: unknown device: {:?}", d);
.filter_map(|d| match d.as_str() {
"cuda" | "nvidia" => Some(Target::Nvidia),
"kunlun" | "kunlunxin" | "baidu" => Some(Target::Baidu),
_ => {
eprintln!("Unknown device: {}", d);
None
}
})
Expand All @@ -44,22 +33,24 @@ pub fn make(
let build = proj_dir.join("build");
fs::create_dir_all(&build).unwrap();

let mut cmd = Command::new("cmake");
cmd.current_dir(&proj_dir)
.arg("-Bbuild")
.arg(format!("-DCMAKE_BUILD_TYPE={release}"))
.arg(format!("-DUSE_CUDA={}", dev(Target::Nvidia)))
.arg(format!("-DUSE_KUNLUN={}", dev(Target::Baidu)));
if let Some(cxx_compiler) = cxx_compiler {
cmd.arg(format!("-DCMAKE_CXX_COMPILER={}", cxx_compiler.display()));
{
let mut cmake = Command::new("cmake");
cmake
.current_dir(&proj_dir)
.arg("-Bbuild")
.arg(format!("-DCMAKE_BUILD_TYPE={release}"))
.arg(format!("-DUSE_CUDA={}", dev(Target::Nvidia)))
.arg(format!("-DUSE_KUNLUN={}", dev(Target::Baidu)));
if let Some(cxx_compiler) = cxx_compiler {
cmake.arg(format!("-DCMAKE_CXX_COMPILER={}", cxx_compiler.display()));
}
assert!(cmake.status().unwrap().success());
}
{
let mut make = Command::new("make");
make.current_dir(&build).arg("-j");
assert!(make.status().unwrap().success());
}
cmd.status().unwrap();

Command::new("make")
.current_dir(&build)
.arg("-j")
.status()
.unwrap();

if install_python {
let from = fs::read_dir(build.join("src/09python_ffi"))
Expand Down

0 comments on commit c1403d7

Please sign in to comment.