"""Worker Consumer Blueprint.This module contains the components responsible for consuming messagesfrom the broker, processing the messages and keeping the broker connectionsup and running."""importerrnoimportloggingimportosimportwarningsfromcollectionsimportdefaultdictfromtimeimportsleepfrombilliard.commonimportrestart_statefrombilliard.exceptionsimportRestartFreqExceededfromkombu.asynchronous.semaphoreimportDummyLockfromkombu.exceptionsimportContentDisallowed,DecodeErrorfromkombu.utils.compatimport_detect_environmentfromkombu.utils.encodingimportsafe_reprfromkombu.utils.limitsimportTokenBucketfromvineimportppartial,promisefromceleryimportbootsteps,signalsfromcelery.app.traceimportbuild_tracerfromcelery.exceptionsimport(CPendingDeprecationWarning,InvalidTaskError,NotRegistered,WorkerShutdown,WorkerTerminate)fromcelery.utils.functionalimportnoopfromcelery.utils.logimportget_loggerfromcelery.utils.nodenamesimportgethostnamefromcelery.utils.objectsimportBunchfromcelery.utils.textimporttruncatefromcelery.utils.timeimporthumanize_seconds,ratefromcelery.workerimportloopsfromcelery.worker.stateimportactive_requests,maybe_shutdown,requests,reserved_requests,task_reserved__all__=('Consumer','Evloop','dump_body')CLOSE=bootsteps.CLOSETERMINATE=bootsteps.TERMINATESTOP_CONDITIONS={CLOSE,TERMINATE}logger=get_logger(__name__)debug,info,warn,error,crit=(logger.debug,logger.info,logger.warning,logger.error,logger.critical)CONNECTION_RETRY="""\consumer: Connection to broker lost. \Trying to re-establish the connection...\"""CONNECTION_RETRY_STEP="""\Trying again {when}... ({retries}/{max_retries})\"""CONNECTION_ERROR="""\consumer: Cannot connect to %s: %s.%s"""CONNECTION_FAILOVER="""\Will retry using next failover.\"""UNKNOWN_FORMAT="""\Received and deleted unknown message. Wrong destination?!?The full contents of the message body was: %s"""#: Error message for when an unregistered task is received.UNKNOWN_TASK_ERROR="""\Received unregistered task of type %s.The message has been ignored and discarded.Did you remember to import the module containing this task?Or maybe you're using relative imports?Please seehttps://docs.celeryq.dev/en/latest/internals/protocol.htmlfor more information.The full contents of the message body was:%sThe full contents of the message headers:%sThe delivery info for this task is:%s"""#: Error message for when an invalid task message is received.INVALID_TASK_ERROR="""\Received invalid task message: %sThe message has been ignored and discarded.Please ensure your message conforms to the taskmessage protocol as described here:https://docs.celeryq.dev/en/latest/internals/protocol.htmlThe full contents of the message body was:%s"""MESSAGE_DECODE_ERROR="""\Can't decode message body: %r [type:%r encoding:%r headers:%s]body: %s"""MESSAGE_REPORT="""\body: {0}{{content_type:{1} content_encoding:{2} delivery_info:{3} headers={4}}}"""TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS="""\Task %s cannot be acknowledged after a connection loss since late acknowledgement is enabled for it.Terminating it instead."""CANCEL_TASKS_BY_DEFAULT="""In Celery 5.1 we introduced an optional breaking change whichon connection loss cancels all currently executed tasks with late acknowledgement enabled.These tasks cannot be acknowledged as the connection is gone, and the tasks are automatically redeliveredback to the queue. You can enable this behavior using the worker_cancel_long_running_tasks_on_connection_losssetting. In Celery 5.1 it is set to False by default. The setting will be set to True by default in Celery 6.0."""
[文档]defdump_body(m,body):"""Format message body for debugging purposes."""# v2 protocol does not deserialize bodybody=m.bodyifbodyisNoneelsebodyreturn'{} ({}b)'.format(truncate(safe_repr(body),1024),len(m.body))
[文档]classConsumer:"""Consumer blueprint."""Strategies=dict#: Optional callback called the first time the worker#: is ready to receive tasks.init_callback=None#: The current worker pool instance.pool=None#: A timer used for high-priority internal tasks, such#: as sending heartbeats.timer=Nonerestart_count=-1# first start is the same as a restart#: This flag will be turned off after the first failed#: connection attempt.first_connection_attempt=True
def__init__(self,on_task_request,init_callback=noop,hostname=None,pool=None,app=None,timer=None,controller=None,hub=None,amqheartbeat=None,worker_options=None,disable_rate_limits=False,initial_prefetch_count=2,prefetch_multiplier=1,**kwargs):self.app=appself.controller=controllerself.init_callback=init_callbackself.hostname=hostnameorgethostname()self.pid=os.getpid()self.pool=poolself.timer=timerself.strategies=self.Strategies()self.conninfo=self.app.connection_for_read()self.connection_errors=self.conninfo.connection_errorsself.channel_errors=self.conninfo.channel_errorsself._restart_state=restart_state(maxR=5,maxT=1)self._does_info=logger.isEnabledFor(logging.INFO)self._limit_order=0self.on_task_request=on_task_requestself.on_task_message=set()self.amqheartbeat_rate=self.app.conf.broker_heartbeat_checkrateself.disable_rate_limits=disable_rate_limitsself.initial_prefetch_count=initial_prefetch_countself.prefetch_multiplier=prefetch_multiplierself._maximum_prefetch_restored=True# this contains a tokenbucket for each task type by name, used for# rate limits, or None if rate limits are disabled for that task.self.task_buckets=defaultdict(lambda:None)self.reset_rate_limits()self.hub=hubifself.huborgetattr(self.pool,'is_green',False):self.amqheartbeat=amqheartbeatifself.amqheartbeatisNone:self.amqheartbeat=self.app.conf.broker_heartbeatelse:self.amqheartbeat=0ifnothasattr(self,'loop'):self.loop=loops.asynloopifhubelseloops.synloopif_detect_environment()=='gevent':# there's a gevent bug that causes timeouts to not be reset,# so if the connection timeout is exceeded once, it can NEVER# connect again.self.app.conf.broker_connection_timeout=Noneself._pending_operations=[]self.steps=[]self.blueprint=self.Blueprint(steps=self.app.steps['consumer'],on_close=self.on_close,)self.blueprint.apply(self,**dict(worker_optionsor{},**kwargs))
def_update_prefetch_count(self,index=0):"""Update prefetch count after pool/shrink grow operations. Index must be the change in number of processes as a positive (increasing) or negative (decreasing) number. Note: Currently pool grow operations will end up with an offset of +1 if the initial size of the pool was 0 (e.g. :option:`--autoscale=1,0 <celery worker --autoscale>`). """num_processes=self.pool.num_processesifnotself.initial_prefetch_countornotnum_processes:return# prefetch disabledself.initial_prefetch_count=(self.pool.num_processes*self.prefetch_multiplier)returnself._update_qos_eventually(index)def_update_qos_eventually(self,index):return(self.qos.decrement_eventuallyifindex<0elseself.qos.increment_eventually)(abs(index)*self.prefetch_multiplier)def_limit_move_to_pool(self,request):task_reserved(request)self.on_task_request(request)def_schedule_bucket_request(self,bucket):whileTrue:try:request,tokens=bucket.pop()exceptIndexError:# no request, breakbreakifbucket.can_consume(tokens):self._limit_move_to_pool(request)continueelse:# requeue to head, keep the order.bucket.contents.appendleft((request,tokens))pri=self._limit_order=(self._limit_order+1)%10hold=bucket.expected_time(tokens)self.timer.call_after(hold,self._schedule_bucket_request,(bucket,),priority=pri,)# no tokens, breakbreakdef_limit_task(self,request,bucket,tokens):bucket.add((request,tokens))returnself._schedule_bucket_request(bucket)def_limit_post_eta(self,request,bucket,tokens):self.qos.decrement_eventually()bucket.add((request,tokens))returnself._schedule_bucket_request(bucket)
[文档]defstart(self):blueprint=self.blueprintwhileblueprint.statenotinSTOP_CONDITIONS:maybe_shutdown()ifself.restart_count:try:self._restart_state.step()exceptRestartFreqExceededasexc:crit('Frequent restarts detected: %r',exc,exc_info=1)sleep(1)self.restart_count+=1ifself.app.conf.broker_channel_error_retry:recoverable_errors=(self.connection_errors+self.channel_errors)else:recoverable_errors=self.connection_errorstry:blueprint.start(self)exceptrecoverable_errorsasexc:# If we're not retrying connections, we need to properly shutdown or terminate# the Celery main process instead of abruptly aborting the process without any cleanup.is_connection_loss_on_startup=self.first_connection_attemptself.first_connection_attempt=Falseconnection_retry_type=self._get_connection_retry_type(is_connection_loss_on_startup)connection_retry=self.app.conf[connection_retry_type]ifnotconnection_retry:crit(f"Retrying to {'establish'ifis_connection_loss_on_startupelse're-establish'} "f"a connection to the message broker after a connection loss has "f"been disabled (app.conf.{connection_retry_type}=False). Shutting down...")raiseWorkerShutdown(1)fromexcifisinstance(exc,OSError)andexc.errno==errno.EMFILE:crit("Too many open files. Aborting...")raiseWorkerTerminate(1)fromexcmaybe_shutdown()ifblueprint.statenotinSTOP_CONDITIONS:ifself.connection:self.on_connection_error_after_connected(exc)else:self.on_connection_error_before_connected(exc)self.on_close()blueprint.restart(self)
[文档]defon_connection_error_before_connected(self,exc):error(CONNECTION_ERROR,self.conninfo.as_uri(),exc,'Trying to reconnect...')
[文档]defon_connection_error_after_connected(self,exc):warn(CONNECTION_RETRY,exc_info=True)try:self.connection.collect()exceptException:# pylint: disable=broad-exceptpassifself.app.conf.worker_cancel_long_running_tasks_on_connection_loss:forrequestintuple(active_requests):ifrequest.task.acks_lateandnotrequest.acknowledged:warn(TERMINATING_TASK_ON_RESTART_AFTER_A_CONNECTION_LOSS,request)request.cancel(self.pool)else:warnings.warn(CANCEL_TASKS_BY_DEFAULT,CPendingDeprecationWarning)ifself.app.conf.worker_enable_prefetch_count_reduction:self.initial_prefetch_count=max(self.prefetch_multiplier,self.max_prefetch_count-len(tuple(active_requests))*self.prefetch_multiplier)self._maximum_prefetch_restored=self.initial_prefetch_count==self.max_prefetch_countifnotself._maximum_prefetch_restored:logger.info(f"Temporarily reducing the prefetch count to {self.initial_prefetch_count} to avoid "f"over-fetching since {len(tuple(active_requests))} tasks are currently being processed.\n"f"The prefetch count will be gradually restored to {self.max_prefetch_count} as the tasks ""complete processing.")
[文档]defon_decode_error(self,message,exc):"""Callback called if an error occurs while decoding a message. Simply logs the error and acknowledges the message so it doesn't enter a loop. Arguments: message (kombu.Message): The message received. exc (Exception): The exception being handled. """crit(MESSAGE_DECODE_ERROR,exc,message.content_type,message.content_encoding,safe_repr(message.headers),dump_body(message,message.body),exc_info=1)message.ack()
[文档]defon_close(self):# Clear internal queues to get rid of old messages.# They can't be acked anyway, as a delivery tag is specific# to the current channel.ifself.controllerandself.controller.semaphore:self.controller.semaphore.clear()ifself.timer:self.timer.clear()forbucketinself.task_buckets.values():ifbucket:bucket.clear_pending()forrequest_idinreserved_requests:ifrequest_idinrequests:delrequests[request_id]reserved_requests.clear()ifself.poolandself.pool.flush:self.pool.flush()
[文档]defconnect(self):"""Establish the broker connection used for consuming tasks. Retries establishing the connection if the :setting:`broker_connection_retry` setting is enabled """conn=self.connection_for_read(heartbeat=self.amqheartbeat)ifself.hub:conn.transport.register_with_event_loop(conn.connection,self.hub)returnconn
[文档]defensure_connected(self,conn):# Callback called for each retry while the connection# can't be established.def_error_handler(exc,interval,next_step=CONNECTION_RETRY_STEP):ifgetattr(conn,'alt',None)andinterval==0:next_step=CONNECTION_FAILOVERnext_step=next_step.format(when=humanize_seconds(interval,'in',' '),retries=int(interval/2),max_retries=self.app.conf.broker_connection_max_retries)error(CONNECTION_ERROR,conn.as_uri(),exc,next_step)# Remember that the connection is lazy, it won't establish# until needed.# TODO: Rely only on broker_connection_retry_on_startup to determine whether connection retries are disabled.# We will make the switch in Celery 6.0.retry_disabled=Falseifself.app.conf.broker_connection_retry_on_startupisNone:# If broker_connection_retry_on_startup is not set, revert to broker_connection_retry# to determine whether connection retries are disabled.retry_disabled=notself.app.conf.broker_connection_retryifretry_disabled:warnings.warn(CPendingDeprecationWarning("The broker_connection_retry configuration setting will no longer determine\n""whether broker connection retries are made during startup in Celery 6.0 and above.\n""If you wish to refrain from retrying connections on startup,\n""you should set broker_connection_retry_on_startup to False instead."))else:ifself.first_connection_attempt:retry_disabled=notself.app.conf.broker_connection_retry_on_startupelse:retry_disabled=notself.app.conf.broker_connection_retryifretry_disabled:# Retry disabled, just call connect directly.conn.connect()self.first_connection_attempt=Falsereturnconnconn=conn.ensure_connection(_error_handler,self.app.conf.broker_connection_max_retries,callback=maybe_shutdown,)self.first_connection_attempt=Falsereturnconn
[文档]defadd_task_queue(self,queue,exchange=None,exchange_type=None,routing_key=None,**options):cset=self.task_consumerqueues=self.app.amqp.queues# Must use in' here, as __missing__ will automatically# create queues when :setting:`task_create_missing_queues` is enabled.# (Issue #1079)ifqueueinqueues:q=queues[queue]else:exchange=queueifexchangeisNoneelseexchangeexchange_type=('direct'ifexchange_typeisNoneelseexchange_type)q=queues.select_add(queue,exchange=exchange,exchange_type=exchange_type,routing_key=routing_key,**options)ifnotcset.consuming_from(queue):cset.add_queue(q)cset.consume()info('Started consuming from %s',queue)
[文档]defapply_eta_task(self,task):"""Method called by the timer to apply a task with an ETA/countdown."""task_reserved(task)self.on_task_request(task)self.qos.decrement_eventually()
[文档]defcreate_task_handler(self,promise=promise):strategies=self.strategieson_unknown_message=self.on_unknown_messageon_unknown_task=self.on_unknown_taskon_invalid_task=self.on_invalid_taskcallbacks=self.on_task_messagecall_soon=self.call_soondefon_task_received(message):# payload will only be set for v1 protocol, since v2# will defer deserializing the message body to the pool.payload=Nonetry:type_=message.headers['task']# protocol v2exceptTypeError:returnon_unknown_message(None,message)exceptKeyError:try:payload=message.decode()exceptExceptionasexc:# pylint: disable=broad-exceptreturnself.on_decode_error(message,exc)try:type_,payload=payload['task'],payload# protocol v1except(TypeError,KeyError):returnon_unknown_message(payload,message)try:strategy=strategies[type_]exceptKeyErrorasexc:returnon_unknown_task(None,message,exc)else:try:ack_log_error_promise=promise(call_soon,(message.ack_log_error,),on_error=self._restore_prefetch_count_after_connection_restart,)reject_log_error_promise=promise(call_soon,(message.reject_log_error,),on_error=self._restore_prefetch_count_after_connection_restart,)if(notself._maximum_prefetch_restoredandself.restart_count>0andself._new_prefetch_count<=self.max_prefetch_count):ack_log_error_promise.then(self._restore_prefetch_count_after_connection_restart,on_error=self._restore_prefetch_count_after_connection_restart)reject_log_error_promise.then(self._restore_prefetch_count_after_connection_restart,on_error=self._restore_prefetch_count_after_connection_restart)strategy(message,payload,ack_log_error_promise,reject_log_error_promise,callbacks,)except(InvalidTaskError,ContentDisallowed)asexc:returnon_invalid_task(payload,message,exc)exceptDecodeErrorasexc:returnself.on_decode_error(message,exc)returnon_task_received
def_restore_prefetch_count_after_connection_restart(self,p,*args):withself.qos._mutex:ifany((notself.app.conf.worker_enable_prefetch_count_reduction,self._maximum_prefetch_restored,)):returnnew_prefetch_count=min(self.max_prefetch_count,self._new_prefetch_count)self.qos.value=self.initial_prefetch_count=new_prefetch_countself.qos.set(self.qos.value)already_restored=self._maximum_prefetch_restoredself._maximum_prefetch_restored=new_prefetch_count==self.max_prefetch_countifalready_restoredisFalseandself._maximum_prefetch_restoredisTrue:logger.info("Resuming normal operations following a restart.\n"f"Prefetch count has been restored to the maximum of {self.max_prefetch_count}")@propertydefmax_prefetch_count(self):returnself.pool.num_processes*self.prefetch_multiplier@propertydef_new_prefetch_count(self):returnself.qos.value+self.prefetch_multiplierdef__repr__(self):"""``repr(self)``."""return'<Consumer: {self.hostname} ({state})>'.format(self=self,state=self.blueprint.human_state(),)
[文档]defcancel_all_unacked_requests(self):"""Cancel all active requests that either do not require late acknowledgments or, if they do, have not been acknowledged yet. """defshould_cancel(request):ifnotrequest.task.acks_late:# Task does not require late acknowledgment, cancel it.returnTrueifnotrequest.acknowledged:# Task is late acknowledged, but it has not been acknowledged yet, cancel it.returnTrue# Task is late acknowledged, but it has already been acknowledged.returnFalse# Do not cancel and allow it to gracefully finish as it has already been acknowledged.requests_to_cancel=tuple(filter(should_cancel,active_requests))ifrequests_to_cancel:forrequestinrequests_to_cancel:request.cancel(self.pool)
[文档]classEvloop(bootsteps.StartStopStep):"""Event loop service. Note: This is always started last. """label='event loop'last=True