Python源码示例:logzero.logfile()
示例1
def __init__(self, args, train=True):
self.args = args # argparse object
self.logger = logger
self.start_time = datetime.today()
self.config = None # only used for the inference
if train: # for training
self.output_dir = self._return_output_dir()
self.create_output_dir()
log_filename = 'train.log'
else: # for inference
self.output_dir = os.path.dirname(args.model)
self.model_name = os.path.basename(args.model)
log_filename = 'inference_{}.log'.format(self.model_name)
log_name = os.path.join(self.output_dir, log_filename)
logzero.logfile(log_name)
self.log_name = log_name
self.logger.info('Log filename: [{}]'.format(log_name))
示例2
def test_api_loglevel(capsys):
"""
Should reconfigure the internal logger loglevel
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
logzero.logfile(temp.name)
logzero.logger.info("info1")
logzero.loglevel(logging.WARN)
logzero.logger.info("info2")
logzero.logger.warn("warn1")
with open(temp.name) as f:
content = f.read()
assert "] info1" in content
assert "] info2" not in content
assert "] warn1" in content
finally:
temp.close()
示例3
def test_api_loglevel_custom_handlers(capsys):
"""
Should reconfigure the internal logger loglevel and custom handlers
"""
logzero.reset_default_logger()
# TODO
pass
# temp = tempfile.NamedTemporaryFile()
# try:
# logzero.logfile(temp.name)
# logzero.logger.info("info1")
# logzero.loglevel(logging.WARN)
# logzero.logger.info("info2")
# logzero.logger.warn("warn1")
# with open(temp.name) as f:
# content = f.read()
# assert "] info1" in content
# assert "] info2" not in content
# assert "] warn1" in content
# finally:
# temp.close()
示例4
def get_logger(log_dir, loglevel=logging.INFO, tensorboard_dir=None):
from logzero import logger
if not Path(log_dir).exists():
Path(log_dir).mkdir(parents=True)
logzero.loglevel(loglevel)
logzero.logfile(log_dir + '/logfile')
if tensorboard_dir is not None:
if not Path(tensorboard_dir).exists():
Path(tensorboard_dir).mkdir(parents=True)
writer = SummaryWriter(tensorboard_dir)
return logger, writer
return logger
示例5
def get_logger(log_dir, loglevel=logging.INFO, tensorboard_dir=None):
from logzero import logger
if not Path(log_dir).exists():
Path(log_dir).mkdir(parents=True)
logzero.loglevel(loglevel)
logzero.logfile(log_dir + '/logfile')
if tensorboard_dir is not None:
if not Path(tensorboard_dir).exists():
Path(tensorboard_dir).mkdir(parents=True)
writer = SummaryWriter(tensorboard_dir)
return logger, writer
return logger
示例6
def __init__(self, log_name):
self.embed_matrix = None
self.logger = logger
logzero.logfile(log_name)
示例7
def test_api_logfile(capsys):
"""
logzero.logfile(..) should work as expected
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
logzero.logger.info("info1")
# Set logfile
logzero.logfile(temp.name)
logzero.logger.info("info2")
# Remove logfile
logzero.logfile(None)
logzero.logger.info("info3")
# Set logfile again
logzero.logfile(temp.name)
logzero.logger.info("info4")
with open(temp.name) as f:
content = f.read()
assert "] info1" not in content
assert "] info2" in content
assert "] info3" not in content
assert "] info4" in content
finally:
temp.close()
示例8
def test_api_rotating_logfile(capsys):
"""
logzero.rotating_logfile(..) should work as expected
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
logzero.logger.info("info1")
# Set logfile
logzero.logfile(temp.name, maxBytes=10, backupCount=3)
logzero.logger.info("info2")
logzero.logger.info("info3")
with open(temp.name) as f:
content = f.read()
assert "] info1" not in content # logged before setting up logfile
assert "] info2" not in content # already rotated out
assert "] info3" in content # already rotated out
fn_rotated = temp.name + ".1"
assert os.path.exists(fn_rotated)
with open(fn_rotated) as f:
content = f.read()
assert "] info2" in content
finally:
temp.close()
示例9
def test_api_logfile_custom_loglevel():
"""
logzero.logfile(..) should be able to use a custom loglevel
"""
logzero.reset_default_logger()
temp = tempfile.NamedTemporaryFile()
try:
# Set logfile with custom loglevel
logzero.logfile(temp.name, loglevel=logging.WARN)
logzero.logger.info("info1")
logzero.logger.warn("warn1")
# If setting a loglevel with logzero.loglevel(..) it will not overwrite
# the custom loglevel of the file handler
logzero.loglevel(logging.INFO)
logzero.logger.info("info2")
logzero.logger.warn("warn2")
with open(temp.name) as f:
content = f.read()
assert "] info1" not in content
assert "] warn1" in content
assert "] info2" not in content
assert "] warn2" in content
finally:
temp.close()
示例10
def write(Message,*v,**d):
logzero.logfile(log_path)
logzero.loglevel(logging.INFO)
logger.info(str(Message))
if v:
for msg in v:
logger.info('%s\n' %msg)
if d:
for k in d:
logger.info('%s\n' %d[k])
示例11
def get_logger():
# Set a custom formatter
log_format = '%(color)s[%(levelname)1.1s ' \
'%(asctime)s.%(msecs)03d %(module)s:%(lineno)d]' \
'%(end_color)s %(message)s'
formatter = logzero.LogFormatter(fmt=log_format)
logzero.setup_default_logger(formatter=formatter)
logzero.logfile(
'logzero.log',
maxBytes=1e6,
backupCount=3
)
return logzero.logger
示例12
def configure_logger(verbose: bool = False, log_format: str = "string",
log_file: str = None, logger_name: str = "chaostoolkit",
context_id: str = None):
"""
Configure the chaostoolkit logger.
By default logs as strings to stdout and the given file. When `log_format`
is `"json"`, records are set to the console as JSON strings but remain
as strings in the log file. The rationale is that the log file is mostly
for grepping purpose while records written to the console can be forwarded
out of band to anywhere else.
"""
log_level = logging.INFO
# we define colors ourselves as critical is missing in default ones
colors = {
logging.DEBUG: ForegroundColors.CYAN,
logging.INFO: ForegroundColors.GREEN,
logging.WARNING: ForegroundColors.YELLOW,
logging.ERROR: ForegroundColors.RED,
logging.CRITICAL: ForegroundColors.RED
}
fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"
if verbose:
log_level = logging.DEBUG
fmt = "%(color)s[%(asctime)s %(levelname)s] "\
"[%(module)s:%(lineno)d]%(end_color)s %(message)s"
formatter = LogFormatter(
fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S", colors=colors)
if log_format == 'json':
fmt = "(process) (asctime) (levelname) (module) (lineno) (message)"
if context_id:
fmt = "(context_id) {}".format(fmt)
formatter = jsonlogger.JsonFormatter(
fmt, json_default=encoder, timestamp=True)
# sadly, no other way to specify the name of the default logger publicly
LOGZERO_DEFAULT_LOGGER = logger_name
logger = setup_default_logger(level=log_level, formatter=formatter)
if context_id:
logger.addFilter(ChaosToolkitContextFilter(logger_name, context_id))
if log_file:
# always everything as strings in the log file
logger.setLevel(logging.DEBUG)
fmt = "%(color)s[%(asctime)s %(levelname)s] "\
"[%(module)s:%(lineno)d]%(end_color)s %(message)s"
formatter = LogFormatter(fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S",
colors=colors)
logzero.logfile(log_file, formatter=formatter, mode='a',
loglevel=logging.DEBUG)