-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_tensorflow_gpu.py
65 lines (50 loc) · 1.96 KB
/
test_tensorflow_gpu.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import datetime
import logging
import os
from logging import Logger
import tensorflow as tf
def set_tensorflow_config():
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
class ArgsObject:
def __init__(
self,
logger_name: str = "default",
logger_dir: str = "logs",
) -> None:
self.logger_name = logger_name
self.logger_dir = logger_dir
def set_logger(args: ArgsObject) -> Logger:
if not os.path.exists(args.logger_dir):
os.makedirs(args.logger_dir)
time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
file_name = f"{args.logger_name}_{time}.log"
logger = logging.getLogger(args.logger_name)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(args.logger_dir, file_name))
fh_formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s")
fh.setFormatter(fh_formatter)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch_formatter = logging.Formatter("%(name)s - %(message)s")
ch.setFormatter(ch_formatter)
logger.addHandler(ch)
return logger
if __name__ == "__main__":
# Make sure that tensorflow doesn't take away all memory at initialization.
set_tensorflow_config()
args = ArgsObject(logger_name="default", logger_dir="logs")
logger = set_logger(args)
logger.info(f"Tensorflow version: {tf.__version__}")
logger.info(f"Tensorflow cuda available: {tf.test.is_gpu_available()}")