-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkvlogger.py
146 lines (111 loc) · 4.07 KB
/
kvlogger.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
'''
@author: Ken Venner
@contact: ken@venerllc.com
@version: 1.06
Library of tools used to manage logging
'''
import logging
import logging.config
import os
import sys
from logging.handlers import TimedRotatingFileHandler
FORMATTER = logging.Formatter("%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(message)s")
LOG_FILE = "my_app.log"
# Add to your code
# my_logger = get_logger("my module name")
# my_logger.debug("a debug message")
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
return console_handler
def get_file_handler(logfile=LOG_FILE):
file_handler = TimedRotatingFileHandler(LOG_FILE, when='midnight')
file_handler.setFormatter(FORMATTER)
return file_handler
def get_logger(logger_name, logfile=LOG_FILE, loggerlevel=None):
logger = logging.getLogger(logger_name)
if loggerlevel is not None:
logger.setLevel(loggerlevel) # better to have too much log than not enough
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler(logfile))
# with this pattern, it's rarely necessary to propagate the error up to parent
logger.propagate = False
return logger
def get_config(log_path=LOG_FILE,
fhandler='logging.handlers.RotatingFileHandler',
loggerlevel=None,
maxBytes=None):
if maxBytes is None:
maxBytes = 1024 * 1000 * 100
if loggerlevel is None:
loggerlevel = 'DEBUG'
config = {
'disable_existing_loggers': False,
'version': 1,
'formatters': {
'default': {
'format': '%(asctime)s %(levelname)s %(name)s:%(lineno)d %(funcName)s %(message)s',
},
'short': {
'format': '%(asctime)s %(levelname)s %(name)s %(levelname)s:%(lineno)d: %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'formatter': 'default',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout'
},
'file': {
'level': loggerlevel,
'class': fhandler,
'formatter': 'default',
'filename': log_path,
'encoding': 'UTF-8',
'maxBytes': maxBytes,
'backupCount': 3
}
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': loggerlevel,
},
},
}
if fhandler == 'logging.handlers.TimedRotatingFileHandler':
config['handlers']['file']['when'] = 'midnight'
# config['handlers']['file']['interval'] = 1
config['handlers']['file']['backupCount'] = 31
del config['handlers']['file']['maxBytes']
elif fhandler == 'logging.FileHandler':
# config['handlers']['file']['mode'] = 'a'
# config['handlers']['file']['delay'] = False
del config['handlers']['file']['maxBytes']
del config['handlers']['file']['backupCount']
return config
def setHandlerLevel(dictConfig, handlerType, level):
dictConfig['handlers'][handlerType] = level
def dictConfig(config):
logging.config.dictConfig(config)
def getLogger(name):
return logging.getLogger(name)
def clear_logs(config, logger):
log_file = config['handlers']['file']['filename']
logging.shutdown()
os.remove(log_file)
dictConfig(config)
logger.info('Logs cleared at startup: %s', log_file)
"""
Commented out this code block
# Capturing Traceback informatoin in your logs and JSON payload logging pointers
# https://www.datadoghq.com/blog/python-logging-best-practices/
# added logging feature to capture and log unhandled exceptions
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
"""