"""Logging utilities."""importloggingimportnumbersimportosimportsysimportthreadingimporttracebackfromcontextlibimportcontextmanagerfromtypingimportAnyStr,Sequence# noqafromkombu.logimportLOG_LEVELSfromkombu.logimportget_loggeras_get_loggerfromkombu.utils.encodingimportsafe_strfrom.termimportcolored__all__=('ColorFormatter','LoggingProxy','base_logger','set_in_sighandler','in_sighandler','get_logger','get_task_logger','mlevel','get_multiprocessing_logger','reset_multiprocessing_logger','LOG_LEVELS')_process_aware=False_in_sighandler=FalseMP_LOG=os.environ.get('MP_LOG',False)RESERVED_LOGGER_NAMES={'celery','celery.task'}# Sets up our logging hierarchy.## Every logger in the celery package inherits from the "celery"# logger, and every task logger inherits from the "celery.task"# logger.base_logger=logger=_get_logger('celery')
[文档]defset_in_sighandler(value):"""Set flag signifying that we're inside a signal handler."""global_in_sighandler_in_sighandler=value
defiter_open_logger_fds():seen=set()loggers=(list(logging.Logger.manager.loggerDict.values())+[logging.getLogger(None)])forlinloggers:try:forhandlerinl.handlers:try:ifhandlernotinseen:# pragma: no coveryieldhandler.streamseen.add(handler)exceptAttributeError:passexceptAttributeError:# PlaceHolder does not have handlerspass
[文档]@contextmanagerdefin_sighandler():"""Context that records that we are in a signal handler."""set_in_sighandler(True)try:yieldfinally:set_in_sighandler(False)
deflogger_isa(l,p,max=1000):this,seen=l,set()for_inrange(max):ifthis==p:returnTrueelse:ifthisinseen:raiseRuntimeError(f'Logger {l.name!r} parents recursive',)seen.add(this)this=this.parentifnotthis:breakelse:# pragma: no coverraiseRuntimeError(f'Logger hierarchy exceeds {max}')returnFalsedef_using_logger_parent(parent_logger,logger_):ifnotlogger_isa(logger_,parent_logger):logger_.parent=parent_loggerreturnlogger_
[文档]defget_logger(name):"""Get logger by name."""l=_get_logger(name)iflogging.rootnotin(l,l.parent)andlisnotbase_logger:l=_using_logger_parent(base_logger,l)returnl
[文档]defget_task_logger(name):"""Get logger for task module by name."""ifnameinRESERVED_LOGGER_NAMES:raiseRuntimeError(f'Logger name {name!r} is reserved!')return_using_logger_parent(task_logger,get_logger(name))
[文档]defmlevel(level):"""Convert level name/int to log level."""iflevelandnotisinstance(level,numbers.Integral):returnLOG_LEVELS[level.upper()]returnlevel
[文档]classColorFormatter(logging.Formatter):"""Logging formatter that adds colors based on severity."""#: Loglevel -> Color mapping.COLORS=colored().namescolors={'DEBUG':COLORS['blue'],'WARNING':COLORS['yellow'],'ERROR':COLORS['red'],'CRITICAL':COLORS['magenta'],}def__init__(self,fmt=None,use_color=True):super().__init__(fmt)self.use_color=use_color
[文档]defformat(self,record):msg=super().format(record)color=self.colors.get(record.levelname)# reset exception info later for other handlers...einfo=sys.exc_info()ifrecord.exc_info==1elserecord.exc_infoifcolorandself.use_color:try:# safe_str will repr the color object# and color will break on non-string objects# so need to reorder calls based on type.# Issue #427try:ifisinstance(msg,str):returnstr(color(safe_str(msg)))returnsafe_str(color(msg))exceptUnicodeDecodeError:# pragma: no coverreturnsafe_str(msg)# skip colorsexceptExceptionasexc:# pylint: disable=broad-exceptprev_msg,record.exc_info,record.msg=(record.msg,1,'<Unrepresentable {!r}: {!r}>'.format(type(msg),exc),)try:returnsuper().format(record)finally:record.msg,record.exc_info=prev_msg,einfoelse:returnsafe_str(msg)
[文档]classLoggingProxy:"""Forward file object to :class:`logging.Logger` instance. Arguments: logger (~logging.Logger): Logger instance to forward to. loglevel (int, str): Log level to use when logging messages. """mode='w'name=Noneclosed=Falseloglevel=logging.ERROR_thread=threading.local()def__init__(self,logger,loglevel=None):# pylint: disable=redefined-outer-name# Note that the logger global is redefined here, be careful changing.self.logger=loggerself.loglevel=mlevel(loglevelorself.logger.levelorself.loglevel)self._safewrap_handlers()def_safewrap_handlers(self):# Make the logger handlers dump internal errors to# :data:`sys.__stderr__` instead of :data:`sys.stderr` to circumvent# infinite loops.defwrap_handler(handler):# pragma: no coverclassWithSafeHandleError(logging.Handler):defhandleError(self,record):try:traceback.print_exc(None,sys.__stderr__)exceptOSError:pass# see python issue 5971handler.handleError=WithSafeHandleError().handleErrorreturn[wrap_handler(h)forhinself.logger.handlers]
[文档]defwrite(self,data):# type: (AnyStr) -> int"""Write message to logging object."""if_in_sighandler:safe_data=safe_str(data)print(safe_data,file=sys.__stderr__)returnlen(safe_data)ifgetattr(self._thread,'recurse_protection',False):# Logger is logging back to this file, so stop recursing.return0ifdataandnotself.closed:self._thread.recurse_protection=Truetry:safe_data=safe_str(data).rstrip('\n')ifsafe_data:self.logger.log(self.loglevel,safe_data)returnlen(safe_data)finally:self._thread.recurse_protection=Falsereturn0
[文档]defwritelines(self,sequence):# type: (Sequence[str]) -> None"""Write list of strings to file. The sequence can be any iterable object producing strings. This is equivalent to calling :meth:`write` for each string. """forpartinsequence:self.write(part)
[文档]defflush(self):# This object is not buffered so any :meth:`flush`# requests are ignored.pass
[文档]defclose(self):# when the object is closed, no write requests are# forwarded to the logging object anymore.self.closed=True
[文档]defisatty(self):"""Here for file support."""returnFalse
[文档]defget_multiprocessing_logger():"""Return the multiprocessing logger."""try:frombilliardimportutilexceptImportError:passelse:returnutil.get_logger()
[文档]defreset_multiprocessing_logger():"""Reset multiprocessing logging setup."""try:frombilliardimportutilexceptImportError:passelse:ifhasattr(util,'_logger'):# pragma: no coverutil._logger=None