
    IiA                     2   S SK JrJr  S SKrS SKrS SKrS SKrS SKJrJ	r	J
r
  S SKJr  S SKJrJr  S SKJr  S SKJr  S SKJr  S S	KJr  S S
KJr  S SKJrJr  S SKJr  S SKJ r J!r!  S SK"J#r#  S SK$J%r%  \RL                  " \'5      r( " S S\RR                  5      r*g)    )absolute_importdivisionN)KafkaConfigurationErrorKafkaTimeoutErrorUnsupportedVersionError)six)KafkaClient	selectors)Fetcher)SubscriptionState)ConsumerCoordinator)RangePartitionAssignor)RoundRobinPartitionAssignor)MetricConfigMetrics)OffsetResetStrategy)OffsetAndMetadataTopicPartition)Timer)__version__c                      \ rS rSrSr0 SS_SS\-   _SS_S	S_S
S_SS_SS_SS_SS_SS_SS_SS_SS_SS_SS_SS _S!S_0 S"S#_S$S% _S&S_S'S(_S)S_S*S+_S,\\4_S-S_S.S+_S/S0_S1S2_S3S_S4S_S5\R                  \R                  S4/_S6S7_S8S9_S:\" S;5      _E0 S<S=_S>S_S?S_S@S_SAS_SBS_SCS_SDS_SES_SFS_SGSH_SISJ_SK/ _SLS_SMSN_SOS_SPSQ_E\R                  SSSSSSRSSS\SS.ErSrST rSU rSV rSW rSwSX jrSxSY jrSxSZ jrSyS[ jrS\ rS] rS^ rSzS_ jrS{S` jrS|Sa jrSb rSc r Sd r!Se r"Sf r#Sg r$Sh r%S}Si jr&Sj r'Sk r(S~Sl jr)Sm r*Sn r+So r,Sp r-S|Sq jr.Sr r/Ss r0St r1Su r2Svr3g)KafkaConsumer   a<  Consume records from a Kafka cluster.

The consumer will transparently handle the failure of servers in the Kafka
cluster, and adapt as topic-partitions are created or migrate between
brokers. It also interacts with the assigned kafka Group Coordinator node
to allow multiple consumers to load balance consumption of topics (requires
kafka >= 0.9.0.0).

The consumer is not thread safe and should not be shared across threads.

Arguments:
    *topics (str): optional list of topics to subscribe to. If not set,
        call :meth:`~kafka.KafkaConsumer.subscribe` or
        :meth:`~kafka.KafkaConsumer.assign` before consuming records.

Keyword Arguments:
    bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
        strings) that the consumer should contact to bootstrap initial
        cluster metadata. This does not have to be the full node list.
        It just needs to have at least one broker that will respond to a
        Metadata API Request. Default port is 9092. If no servers are
        specified, will default to localhost:9092.
    client_id (str): A name for this client. This string is passed in
        each request to servers and can be used to identify specific
        server-side log entries that correspond to this client. Also
        submitted to GroupCoordinator for logging with respect to
        consumer group administration. Default: 'kafka-python-{version}'
    group_id (str or None): The name of the consumer group to join for dynamic
        partition assignment (if enabled), and to use for fetching and
        committing offsets. If None, auto-partition assignment (via
        group coordinator) and offset commits are disabled.
        Default: None
    key_deserializer (callable): Any callable that takes a
        raw message key and returns a deserialized key.
    value_deserializer (callable): Any callable that takes a
        raw message value and returns a deserialized value.
    enable_incremental_fetch_sessions: (bool): Use incremental fetch sessions
        when available / supported by kafka broker. See KIP-227. Default: True.
    fetch_min_bytes (int): Minimum amount of data the server should
        return for a fetch request, otherwise wait up to
        fetch_max_wait_ms for more data to accumulate. Default: 1.
    fetch_max_wait_ms (int): The maximum amount of time in milliseconds
        the server will block before answering the fetch request if
        there isn't sufficient data to immediately satisfy the
        requirement given by fetch_min_bytes. Default: 500.
    fetch_max_bytes (int): The maximum amount of data the server should
        return for a fetch request. This is not an absolute maximum, if the
        first message in the first non-empty partition of the fetch is
        larger than this value, the message will still be returned to
        ensure that the consumer can make progress. NOTE: consumer performs
        fetches to multiple brokers in parallel so memory usage will depend
        on the number of brokers containing partitions for the topic.
        Supported Kafka version >= 0.10.1.0. Default: 52428800 (50 MB).
    max_partition_fetch_bytes (int): The maximum amount of data
        per-partition the server will return. The maximum total memory
        used for a request = #partitions * max_partition_fetch_bytes.
        This size must be at least as large as the maximum message size
        the server allows or else it is possible for the producer to
        send messages larger than the consumer can fetch. If that
        happens, the consumer can get stuck trying to fetch a large
        message on a certain partition. Default: 1048576.
    request_timeout_ms (int): Client request timeout in milliseconds.
        Default: 305000.
    retry_backoff_ms (int): Milliseconds to backoff when retrying on
        errors. Default: 100.
    reconnect_backoff_ms (int): The amount of time in milliseconds to
        wait before attempting to reconnect to a given host.
        Default: 50.
    reconnect_backoff_max_ms (int): The maximum amount of time in
        milliseconds to backoff/wait when reconnecting to a broker that has
        repeatedly failed to connect. If provided, the backoff per host
        will increase exponentially for each consecutive connection
        failure, up to this maximum. Once the maximum is reached,
        reconnection attempts will continue periodically with this fixed
        rate. To avoid connection storms, a randomization factor of 0.2
        will be applied to the backoff resulting in a random range between
        20% below and 20% above the computed value. Default: 30000.
    max_in_flight_requests_per_connection (int): Requests are pipelined
        to kafka brokers up to this number of maximum requests per
        broker connection. Default: 5.
    auto_offset_reset (str): A policy for resetting offsets on
        OffsetOutOfRange errors: 'earliest' will move to the oldest
        available message, 'latest' will move to the most recent. Any
        other value will raise the exception. Default: 'latest'.
    enable_auto_commit (bool): If True , the consumer's offset will be
        periodically committed in the background. Default: True.
    auto_commit_interval_ms (int): Number of milliseconds between automatic
        offset commits, if enable_auto_commit is True. Default: 5000.
    default_offset_commit_callback (callable): Called as
        callback(offsets, response) response will be either an Exception
        or an OffsetCommitResponse struct. This callback can be used to
        trigger custom actions when a commit request completes.
    check_crcs (bool): Automatically check the CRC32 of the records
        consumed. This ensures no on-the-wire or on-disk corruption to
        the messages occurred. This check adds some overhead, so it may
        be disabled in cases seeking extreme performance. Default: True
    isolation_level (str): Configure KIP-98 transactional consumer by
        setting to 'read_committed'. This will cause the consumer to
        skip records from aborted transactions. Default: 'read_uncommitted'
    allow_auto_create_topics (bool): Enable/disable auto topic creation
        on metadata request. Only available with api_version >= (0, 11).
        Default: True
    metadata_max_age_ms (int): The period of time in milliseconds after
        which we force a refresh of metadata, even if we haven't seen any
        partition leadership changes to proactively discover any new
        brokers or partitions. Default: 300000
    partition_assignment_strategy (list): List of objects to use to
        distribute partition ownership amongst consumer instances when
        group management is used.
        Default: [RangePartitionAssignor, RoundRobinPartitionAssignor]
    max_poll_records (int): The maximum number of records returned in a
        single call to :meth:`~kafka.KafkaConsumer.poll`. Default: 500
    max_poll_interval_ms (int): The maximum delay between invocations of
        :meth:`~kafka.KafkaConsumer.poll` when using consumer group
        management. This places an upper bound on the amount of time that
        the consumer can be idle before fetching more records. If
        :meth:`~kafka.KafkaConsumer.poll` is not called before expiration
        of this timeout, then the consumer is considered failed and the
        group will rebalance in order to reassign the partitions to another
        member. Default 300000
    session_timeout_ms (int): The timeout used to detect failures when
        using Kafka's group management facilities. The consumer sends
        periodic heartbeats to indicate its liveness to the broker. If
        no heartbeats are received by the broker before the expiration of
        this session timeout, then the broker will remove this consumer
        from the group and initiate a rebalance. Note that the value must
        be in the allowable range as configured in the broker configuration
        by group.min.session.timeout.ms and group.max.session.timeout.ms.
        Default: 10000
    heartbeat_interval_ms (int): The expected time in milliseconds
        between heartbeats to the consumer coordinator when using
        Kafka's group management facilities. Heartbeats are used to ensure
        that the consumer's session stays active and to facilitate
        rebalancing when new consumers join or leave the group. The
        value must be set lower than session_timeout_ms, but typically
        should be set no higher than 1/3 of that value. It can be
        adjusted even lower to control the expected time for normal
        rebalances. Default: 3000
    receive_buffer_bytes (int): The size of the TCP receive buffer
        (SO_RCVBUF) to use when reading data. Default: None (relies on
        system defaults). The java client defaults to 32768.
    send_buffer_bytes (int): The size of the TCP send buffer
        (SO_SNDBUF) to use when sending data. Default: None (relies on
        system defaults). The java client defaults to 131072.
    socket_options (list): List of tuple-arguments to socket.setsockopt
        to apply to broker connection sockets. Default:
        [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
    consumer_timeout_ms (int): number of milliseconds to block during
        message iteration before raising StopIteration (i.e., ending the
        iterator). Default block forever [float('inf')].
    security_protocol (str): Protocol used to communicate with brokers.
        Valid values are: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL.
        Default: PLAINTEXT.
    ssl_context (ssl.SSLContext): Pre-configured SSLContext for wrapping
        socket connections. If provided, all other ssl_* configurations
        will be ignored. Default: None.
    ssl_check_hostname (bool): Flag to configure whether ssl handshake
        should verify that the certificate matches the brokers hostname.
        Default: True.
    ssl_cafile (str): Optional filename of ca file to use in certificate
        verification. Default: None.
    ssl_certfile (str): Optional filename of file in pem format containing
        the client certificate, as well as any ca certificates needed to
        establish the certificate's authenticity. Default: None.
    ssl_keyfile (str): Optional filename containing the client private key.
        Default: None.
    ssl_password (str): Optional password to be used when loading the
        certificate chain. Default: None.
    ssl_crlfile (str): Optional filename containing the CRL to check for
        certificate expiration. By default, no CRL check is done. When
        providing a file, only the leaf certificate will be checked against
        this CRL. The CRL can only be checked with Python 3.4+ or 2.7.9+.
        Default: None.
    ssl_ciphers (str): optionally set the available ciphers for ssl
        connections. It should be a string in the OpenSSL cipher list
        format. If no cipher can be selected (because compile-time options
        or other configuration forbids use of all the specified ciphers),
        an ssl.SSLError will be raised. See ssl.SSLContext.set_ciphers
    api_version (tuple): Specify which Kafka API version to use. If set to
        None, the client will attempt to determine the broker version via
        ApiVersionsRequest API or, for brokers earlier than 0.10, probing
        various known APIs. Dynamic version checking is performed eagerly
        during __init__ and can raise NoBrokersAvailableError if no connection
        was made before timeout (see api_version_auto_timeout_ms below).
        Different versions enable different functionality.

        Examples:
            (3, 9) most recent broker release, enable all supported features
            (0, 11) enables message format v2 (internal)
            (0, 10, 0) enables sasl authentication and message format v1
            (0, 9) enables full group coordination features with automatic
                partition assignment and rebalancing,
            (0, 8, 2) enables kafka-storage offset commits with manual
                partition assignment only,
            (0, 8, 1) enables zookeeper-storage offset commits with manual
                partition assignment only,
            (0, 8, 0) enables basic functionality but requires manual
                partition assignment and offset management.

        Default: None
    api_version_auto_timeout_ms (int): number of milliseconds to throw a
        timeout exception from the constructor when checking the broker
        api version. Only applies if api_version set to None.
        Default: 2000
    connections_max_idle_ms: Close idle connections after the number of
        milliseconds specified by this config. The broker closes idle
        connections after connections.max.idle.ms, so this avoids hitting
        unexpected socket disconnected errors on the client.
        Default: 540000
    metric_reporters (list): A list of classes to use as metrics reporters.
        Implementing the AbstractMetricsReporter interface allows plugging
        in classes that will be notified of new metric creation. Default: []
    metrics_enabled (bool): Whether to track metrics on this instance. Default True.
    metrics_num_samples (int): The number of samples maintained to compute
        metrics. Default: 2
    metrics_sample_window_ms (int): The maximum age in milliseconds of
        samples used to compute metrics. Default: 30000
    selector (selectors.BaseSelector): Provide a specific selector
        implementation to use for I/O multiplexing.
        Default: selectors.DefaultSelector
    exclude_internal_topics (bool): Whether records from internal topics
        (such as offsets) should be exposed to the consumer. If set to True
        the only way to receive records from an internal topic is
        subscribing to it. Requires 0.10+ Default: True
    sasl_mechanism (str): Authentication mechanism when security_protocol
        is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are:
        PLAIN, GSSAPI, OAUTHBEARER, SCRAM-SHA-256, SCRAM-SHA-512.
    sasl_plain_username (str): username for sasl PLAIN and SCRAM authentication.
        Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
    sasl_plain_password (str): password for sasl PLAIN and SCRAM authentication.
        Required if sasl_mechanism is PLAIN or one of the SCRAM mechanisms.
    sasl_kerberos_name (str or gssapi.Name): Constructed gssapi.Name for use with
        sasl mechanism handshake. If provided, sasl_kerberos_service_name and
        sasl_kerberos_domain name are ignored. Default: None.
    sasl_kerberos_service_name (str): Service name to include in GSSAPI
        sasl mechanism handshake. Default: 'kafka'
    sasl_kerberos_domain_name (str): kerberos domain name to use in GSSAPI
        sasl mechanism handshake. Default: one of bootstrap servers
    sasl_oauth_token_provider (kafka.sasl.oauth.AbstractTokenProvider): OAuthBearer
        token provider instance. Default: None
    socks5_proxy (str): Socks5 proxy URL. Default: None
    kafka_client (callable): Custom class / callable for creating KafkaClient instances

Note:
    Configuration parameters are described in more detail at
    https://kafka.apache.org/documentation/#consumerconfigs
bootstrap_servers	localhost	client_idzkafka-python-group_idNkey_deserializervalue_deserializer!enable_incremental_fetch_sessionsTfetch_max_wait_msi  fetch_min_bytes   fetch_max_bytesi   max_partition_fetch_bytesi   request_timeout_msih retry_backoff_msd   reconnect_backoff_ms2   reconnect_backoff_max_msi0u  %max_in_flight_requests_per_connection   auto_offset_resetlatestenable_auto_commitauto_commit_interval_msi  default_offset_commit_callbackc                     g)NT )offsetsresponses     :/venv/lib/python3.13/site-packages/kafka/consumer/group.py<lambda>KafkaConsumer.<lambda>&  s    D    
check_crcsisolation_levelread_uncommittedallow_auto_create_topicsmetadata_max_age_msi partition_assignment_strategymax_poll_recordsmax_poll_interval_mssession_timeout_msi'  heartbeat_interval_msi  receive_buffer_bytessend_buffer_bytessocket_optionssock_chunk_bytesi   sock_chunk_buffer_count  consumer_timeout_msinfsecurity_protocol	PLAINTEXTssl_contextssl_check_hostname
ssl_cafilessl_certfilessl_keyfilessl_crlfilessl_passwordssl_ciphersapi_versionapi_version_auto_timeout_msi  connections_max_idle_msi`= metric_reportersmetrics_enabledmetrics_num_samples   metrics_sample_window_msmetric_group_prefixconsumerkafka)selectorexclude_internal_topicssasl_mechanismsasl_plain_usernamesasl_plain_passwordsasl_kerberos_namesasl_kerberos_service_namesasl_kerberos_domain_namesasl_oauth_token_providersocks5_proxykafka_clientc                    [        U5      R                  U R                  5      nU(       a  [        SU< 35      e[        R                  " U R                  5      U l        U R
                  R                  U5        SSS.nU R
                  S   U;   aB  X@R
                  S      n[        R                  SXPR
                  S   5        XPR
                  S'   U R
                  S   nU R
                  S   nU R
                  S	   nXs=:  a  U:  d  O  [        S
R                  XgU5      5      eU R
                  S   (       aj  SU R
                  S   0n	[        U R
                  S   U R
                  S   U	S9n
U R
                  S    Vs/ s H	  o" 5       PM     nn[        X5      U l        OS U l        [        U R
                  S   [        5      (       a  U R
                  S   nUS:X  a  S U R
                  S'   O5[        [!        ["        UR%                  S5      5      5      U R
                  S'   [        R                  S[        U R
                  S   5      U5        U R
                  S   " S$SU R                  0U R
                  D6U l        U R&                  R
                  S   U R
                  S'   U R
                  S   S:  aZ  SU;  a2  SU;   a  US   U R
                  S'   OU R(                  U R
                  S'   SU;  a  U R
                  S   U R
                  S'   U R
                  S   bN  U R
                  S   U R
                  S   ::  a.  [        SU R
                  S   < SU R
                  S   < S35      e[+        U R
                  S   5      U l        [/        U R&                  U R,                  4SU R                  0U R
                  D6U l        [3        U R&                  U R,                  4U R                  U R
                  S   S .U R
                  D6U l        S!U l        S U l        [;        S"5      U l        U(       a5  U R,                  R?                  US#9  U R&                  RA                  U5        g g s  snf )%NzUnrecognized configs: earliestr/   )smallestlargestr.   z+use auto_offset_reset=%s (%s is deprecated)rY   r&   r!   zzconnections_max_idle_ms ({}) must be larger than request_timeout_ms ({}) which must be larger than fetch_max_wait_ms ({}).r[   z	client-idr   r\   r^   )samplestime_window_mstagsrZ   rW   auto.z7use api_version=%s [tuple] -- "%s" as str is deprecatedrl   metrics)r   
   r#   rC   rB   r   zRequest timeout (z') must be larger than session timeout ()r@   )rv   	assignorsFrL   )topicsr4   )!set
differenceDEFAULT_CONFIGr   copyconfigupdatelogwarningformatr   r   _metrics
isinstancestrtuplemapintsplit_clientDEFAULT_SESSION_TIMEOUT_MS_0_9r   _subscriptionr   _fetcherr   _coordinator_closed	_iteratorfloat_consumer_timeout	subscribe
set_topics)selfrz   configsextra_configs
deprecated
new_configrY   r&   r!   metrics_tagsmetric_configreporter	reportersstr_versions                 r7   __init__KafkaConsumer.__init__U  s   G//0C0CD)*WXXii 3 347#",B
;;*+z9#KK0C$DEJKKE"KK0C$DF/9KK+,"&++.G"H![[)=> KK(;<!P9PP)* /EVW	Y Y ;;()'[)ABL(=R1S8<D^8_.:<M 59KK@R4ST4S4SIT#M=DM DM dkk-0#66++m4Kf$-1M*-23sK<M<Mc<R3S-TM*KKQDKK67F {{>2X4==XDKKX &*\\%8%8%GM" ;;}%
2#72)W48?@V8WDKK 458<8[8[DKK 45%W46:kkBV6W23;;z".{{/0DKK@T4UU-[[!56DX8Y[\ \ /t{{;N/OPLL$,,T6:mmTGK{{T/LL$,,6:mmkk"AB kk !&u(((7LL##F+ e Us   Q8c                 6    U R                   R                  5       $ )z*Return True if the bootstrap is connected.)r   bootstrap_connectedr   s    r7   r   !KafkaConsumer.bootstrap_connected  s    ||//11r:   c                 >   U(       d  U R                  5         gU R                  R                  5         U R                  R	                  U5        U R
                  R                  U Vs/ s H  o"R                  PM     sn5        [        R                  SU5        gs  snf )a,  Manually assign a list of TopicPartitions to this consumer.

Arguments:
    partitions (list of TopicPartition): Assignment for this instance.

Raises:
    IllegalStateError: If consumer has already called
    :meth:`~kafka.KafkaConsumer.subscribe`.

Warning:
    It is not possible to use both manual partition assignment with
    :meth:`~kafka.KafkaConsumer.assign` and group assignment with
    :meth:`~kafka.KafkaConsumer.subscribe`.

Note:
    This interface does not support incremental assignment and will
    replace the previous assignment (if there was one).

Note:
    Manual topic assignment through this method does not use the
    consumer's group management functionality. As such, there will be
    no rebalance operation triggered when group membership or cluster
    and topic metadata change.
zSubscribed to partition(s): %sN)
unsubscriber   maybe_auto_commit_offsets_nowr   assign_from_userr   r   topicr   debug)r   
partitionstps      r7   assignKafkaConsumer.assign  ss    2  ;;=//
;LL##
$C
"XX
$CDII6
C %Ds   'Bc                 6    U R                   R                  5       $ )a  Get the TopicPartitions currently assigned to this consumer.

If partitions were directly assigned using
:meth:`~kafka.KafkaConsumer.assign`, then this will simply return the
same partitions that were previously assigned.  If topics were
subscribed using :meth:`~kafka.KafkaConsumer.subscribe`, then this will
give the set of topic partitions currently assigned to the consumer
(which may be None if the assignment hasn't happened yet, or if the
partitions are in the process of being reassigned).

Returns:
    set: {TopicPartition, ...}
)r   assigned_partitionsr   s    r7   
assignmentKafkaConsumer.assignment  s     !!5577r:   c                    U R                   (       a  g[        R                  S5        SU l         U R                  R	                  XS9  U R
                  (       a  U R
                  R	                  5         U R                  R	                  5          U R                  S   R	                  5          U R                  S   R	                  5         [        R                  S5        g! [         a     N@f = f! [         a     N2f = f)a  Close the consumer, waiting indefinitely for any needed cleanup.

Keyword Arguments:
    autocommit (bool): If auto-commit is configured for this consumer,
        this optional flag causes the consumer to attempt to commit any
        pending consumed offsets prior to close. Default: True
    timeout_ms (num, optional): Milliseconds to wait for auto-commit.
        Default: None
NzClosing the KafkaConsumer.T)
autocommit
timeout_msr   r   zThe KafkaConsumer has closed.)	r   r   r   r   closer   r   r   AttributeError)r   r   r   s      r7   r   KafkaConsumer.close  s     <<		./:M==MM!	KK*+113	KK,-335 			12  		  		s$   C ,C/ 
C,+C,/
C<;C<c                     U R                   S   S:  d   S5       eU R                   S   c   S5       eUc  U R                  R                  5       n[        R	                  SU5        U R
                  R                  XS9nU$ )ac  Commit offsets to kafka asynchronously, optionally firing callback.

This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.

This is an asynchronous call and will not block. Any errors encountered
are either passed to the callback (if provided) or discarded.

Arguments:
    offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
        to commit with the configured group_id. Defaults to currently
        consumed offsets for all subscribed partitions.
    callback (callable, optional): Called as callback(offsets, response)
        with response as either an Exception or an OffsetCommitResponse
        struct. This callback can be used to trigger custom actions when
        a commit request completes.

Returns:
    kafka.future.Future
rW   r      r#   Requires >= Kafka 0.8.1r   Requires group_idzCommitting offsets: %s)callback)r   r   all_consumed_offsetsr   r   r   commit_offsets_async)r   r5   r   futures       r7   commit_asyncKafkaConsumer.commit_async  s    2 {{=)Y6Q8QQ6{{:&2G4GG2?((==?G		*G4""77 8 (r:   c                     U R                   S   S:  d   S5       eU R                   S   c   S5       eUc  U R                  R                  5       nU R                  R	                  XS9  g)au  Commit offsets to kafka, blocking until success or error.

This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.

Blocks until either the commit succeeds or an unrecoverable error is
encountered (in which case it is thrown to the caller).

Currently only supports kafka-topic offset storage (not zookeeper).

Arguments:
    offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
        to commit with the configured group_id. Defaults to currently
        consumed offsets for all subscribed partitions.
rW   r   r   r   Nr   r   )r   r   r   r   commit_offsets_sync)r   r5   r   s      r7   commitKafkaConsumer.commit  sl    ( {{=)Y6Q8QQ6{{:&2G4GG2?((==?G--g-Mr:   c                    U R                   S   S:  d   S5       eU R                   S   c   S5       e[        U[        5      (       d  [        S5      eU R                  R                  U/US9nX;  a  gU(       a  XA   $ XA   R                  $ )	a  Get the last committed offset for the given partition.

This offset will be used as the position for the consumer
in the event of a failure.

This call will block to do a remote call to get the latest committed
offsets from the server.

Arguments:
    partition (TopicPartition): The partition to check.
    metadata (bool, optional): If True, return OffsetAndMetadata struct
        instead of offset int. Default: False.

Returns:
    The last committed offset (int or OffsetAndMetadata), or None if there was no prior commit.

Raises:
    KafkaTimeoutError if timeout_ms provided
    BrokerResponseErrors if OffsetFetchRequest raises an error.
rW   r   r   r   Nr   -partition must be a TopicPartition namedtupler   )r   r   r   	TypeErrorr   fetch_committed_offsetsoffset)r   	partitionmetadatar   	committeds        r7   r   KafkaConsumer.committed9  s    * {{=)Y6Q8QQ6{{:&2G4GG2)^44KLL%%==ykV`=a	%'/y#PY5I5P5PPr:   c                 r   U R                   R                  nU R                   R                  (       aD  U R                   R                  (       a)  UR	                  5       nU R                   R                  US9  UR                  nSUl        UR	                  5       nU R                   R                  US9  X1l        g)zoA blocking call that fetches topic metadata for all topics in the
cluster that the user is authorized to view.
)r   TN)r   cluster_metadata_refresh_in_progress_topicsrequest_updatepollneed_all_topic_metadata)r   r   r   stashs       r7   _fetch_all_topic_metadata'KafkaConsumer._fetch_all_topic_metadataW  s     ,,&&<<55$,,:N:N++-FLLV,//*.''')(*/'r:   c                 j    U R                  5         U R                  R                  R                  5       $ )zGet all topics the user is authorized to view.
This will always issue a remote call to the cluster to fetch the latest
information.

Returns:
    set: topics
)r   r   r   rz   r   s    r7   rz   KafkaConsumer.topicse  s)     	&&(||##**,,r:   c                     U R                   R                  nUR                  U5      nUc!  U R                  5         UR                  U5      nU=(       d
    [	        5       $ )av  This method first checks the local metadata cache for information
about the topic. If the topic is not found (either because the topic
does not exist, the user is not authorized to view the topic, or the
metadata cache is not populated), then it will issue a metadata update
call to the cluster.

Arguments:
    topic (str): Topic to check.

Returns:
    set: Partition ids
)r   r   partitions_for_topicr   r{   )r   r   r   r   s       r7   r   "KafkaConsumer.partitions_for_topicp  sR     ,,&&11%8
**, 55e<J"SU"r:   c                 ~   US:  d   S5       eUc  U R                   S   n[        U[        5      (       d   S5       eUS:  d   S5       eU R                  (       a   S5       e[	        U5      nU R                  (       d@  U R                  XBUS9nU(       a  U$ UR                  (       a   0 $ U R                  (       d  M@  0 $ )a+  Fetch data from assigned topics / partitions.

Records are fetched and returned in batches by topic-partition.
On each poll, consumer will try to use the last consumed offset as the
starting offset and fetch sequentially. The last consumed offset can be
manually set through :meth:`~kafka.KafkaConsumer.seek` or automatically
set as the last committed offset for the subscribed list of partitions.

Incompatible with iterator interface -- use one or the other, not both.

Arguments:
    timeout_ms (int, optional): Milliseconds spent waiting in poll if
        data is not available in the buffer. If 0, returns immediately
        with any records that are available currently in the buffer,
        else returns empty. Must not be negative. Default: 0
    max_records (int, optional): The maximum number of records returned
        in a single call to :meth:`~kafka.KafkaConsumer.poll`.
        Default: Inherit value from max_poll_records.

Returns:
    dict: Topic to list of records since the last fetch for the
        subscribed list of topics and partitions.
r   zTimeout must not be negativerA   zmax_records must be an integerzmax_records must be positivezKafkaConsumer is closedupdate_offsets)r   r   r   r   r   
_poll_onceexpired)r   r   max_recordsr   timerrecordss         r7   r   KafkaConsumer.poll  s    < Q> >>++&89K+s++M-MM+Q> >><<:!:: j!,,ooeoXG	 ,,, 	r:   c                    U R                   R                  UR                  S9(       d  [        R	                  S5        0 $ U R                  UR                  S9nU R                  R                  X#S9u  pV[        R	                  SXV5        U(       dX  [        R	                  S5        U R                  R                  5       n[        U5      (       a  U R                  R                  SS9  U(       a  U$ [        UR                  U R                   R                  5       S-  5      nU(       d-  [        R	                  S5        [        XR                  S	   5      nU R                  R                  US9  U R                   R                  5       (       a  [        R	                  S
5        0 $ U R                  R                  X#S9u  pYU$ )a  Do one round of polling. In addition to checking for new data, this does
any needed heart-beating, auto-commits, and offset updates.

Arguments:
    timer (Timer): The maximum time in milliseconds to block.

Returns:
    dict: Map of topic to list of records (may be empty).
r   z8poll: timeout during coordinator.poll(); returning earlyr   zpoll: fetched records: %s, %szpoll: Sending fetchesr   rJ   z(poll: do not have all fetch positions...r'   z/poll: coordinator needs rejoin; returning early)r   r   r   r   r   _update_fetch_positionsr   fetched_recordssend_fetcheslenr   mintime_to_next_pollr   need_rejoin)
r   r   r   r   has_all_fetch_positionsr   partialfuturespoll_timeout_ms_s
             r7   r   KafkaConsumer._poll_once  sk      %%1A1A%BIIPQI"&">">%JZJZ">"[  ==888d		17D
 II-.mm002G7||!!Q!/N e..0A0A0S0S0UX\0\]&II@A!/;;?Q3RSO_5 ((**IIGHI]]22;2^
r:   c                    [        U[        5      (       d  [        S5      eU R                  R	                  U5      (       d   S5       e[        U5      nU R                  R                  U   R                  nUcY  U R                  UR                  S9(       a$  U R                  R                  U   R                  nOUR                  (       a  gUc  MY  UR                  $ )zGet the offset of the next record that will be fetched

Arguments:
    partition (TopicPartition): Partition to check

Returns:
    int: Offset or None
r   Partition is not assignedNr   )r   r   r   r   is_assignedr   r   positionr   r   r   r   )r   r   r   r   r   s        r7   r   KafkaConsumer.position  s     )^44KLL!!--i88U:UU8j!%%00;DD++u7G7G+H--88CLL  ??"r:   c                     [        U[        5      (       d  [        S5      eU R                  R	                  U5      (       d   S5       eU R                  R
                  U   R                  $ )ad  Last known highwater offset for a partition.

A highwater offset is the offset that will be assigned to the next
message that is produced. It may be useful for calculating lag, by
comparing with the reported position. Note that both position and
highwater refer to the *next* offset -- i.e., highwater offset is
one greater than the newest available message.

Highwater offsets are returned in FetchResponse messages, so will
not be available if no FetchRequests have been sent for this partition
yet.

Arguments:
    partition (TopicPartition): Partition to check

Returns:
    int or None: Offset if available
r   r   )r   r   r   r   r   r   	highwater)r   r   s     r7   r   KafkaConsumer.highwater  s]    & )^44KLL!!--i88U:UU8!!,,Y7AAAr:   c           	          [        U Vs/ s H  n[        U[        5      PM     sn5      (       d  [        S5      eU H4  n[        R                  SU5        U R                  R                  U5        M6     gs  snf )a  Suspend fetching from the requested partitions.

Future calls to :meth:`~kafka.KafkaConsumer.poll` will not return any
records from these partitions until they have been resumed using
:meth:`~kafka.KafkaConsumer.resume`.

Note: This method does not affect partition subscription. In particular,
it does not cause a group rebalance when automatic assignment is used.

Arguments:
    *partitions (TopicPartition): Partitions to pause.
-partitions must be TopicPartition namedtupleszPausing partition %sN)allr   r   r   r   r   r   pauser   r   pr   s       r7   r  KafkaConsumer.pause  sd     :F:aJq.1:FGGKLL#III,i8$$Y/ $ G   A6c                 6    U R                   R                  5       $ )zGet the partitions that were previously paused using
:meth:`~kafka.KafkaConsumer.pause`.

Returns:
    set: {partition (TopicPartition), ...}
)r   paused_partitionsr   s    r7   pausedKafkaConsumer.paused+  s     !!3355r:   c           	          [        U Vs/ s H  n[        U[        5      PM     sn5      (       d  [        S5      eU H4  n[        R                  SU5        U R                  R                  U5        M6     gs  snf )z|Resume fetching from the specified (paused) partitions.

Arguments:
    *partitions (TopicPartition): Partitions to resume.
r  zResuming partition %sN)r  r   r   r   r   r   r   resumer  s       r7   r  KafkaConsumer.resume4  sd     :F:aJq.1:FGGKLL#III-y9%%i0 $ Gr  c                 Z   [        U[        5      (       d  [        S5      e[        U[        5      (       a  US:  d   S5       eXR                  R                  5       ;   d   S5       e[        R                  SX!5        U R                  R                  U   R                  U5        SU l
        g)a  Manually specify the fetch offset for a TopicPartition.

Overrides the fetch offsets that the consumer will use on the next
:meth:`~kafka.KafkaConsumer.poll`. If this API is invoked for the same
partition more than once, the latest offset will be used on the next
:meth:`~kafka.KafkaConsumer.poll`.

Note: You may lose data if this API is arbitrarily used in the middle of
consumption to reset the fetch offsets.

Arguments:
    partition (TopicPartition): Partition for seek operation
    offset (int): Message offset in partition

Raises:
    AssertionError: If offset is not an int >= 0; or if partition is not
        currently assigned.
r   r   zOffset must be >= 0Unassigned partitionz%Seeking to offset %s for partition %sN)r   r   r   r   r   r   r   r   r   seekr   )r   r   r   s      r7   r  KafkaConsumer.seek@  s    & )^44KLL&#&&6Q;M8MM6..BBDD\F\\D		96M%%i055f=r:   c           	         [        U Vs/ s H  n[        U[        5      PM     sn5      (       d  [        S5      eU(       d)  U R                  R                  5       nU(       d   S5       eO-U H'  nX R                  R                  5       ;   a  M"   S5       e   U HC  n[        R                  SU5        U R                  R                  U[        R                  5        ME     SU l        gs  snf )a'  Seek to the oldest available offset for partitions.

Arguments:
    *partitions: Optionally provide specific TopicPartitions, otherwise
        default to all assigned partitions.

Raises:
    AssertionError: If any partition is not currently assigned, or if
        no partitions are assigned.
r  $No partitions are currently assignedr  z$Seeking to beginning of partition %sN)r  r   r   r   r   r   r   r   request_offset_resetr   EARLIESTr   r   r   r  r   s       r7   seek_to_beginningKafkaConsumer.seek_to_beginning[  s     :F:aJq.1:FGGKLL++??AJEEE:..BBDD\F\\D   BII<bA33B8K8T8TU   G   C)c           	         [        U Vs/ s H  n[        U[        5      PM     sn5      (       d  [        S5      eU(       d)  U R                  R                  5       nU(       d   S5       eO-U H'  nX R                  R                  5       ;   a  M"   S5       e   U HC  n[        R                  SU5        U R                  R                  U[        R                  5        ME     SU l        gs  snf )a,  Seek to the most recent available offset for partitions.

Arguments:
    *partitions: Optionally provide specific TopicPartitions, otherwise
        default to all assigned partitions.

Raises:
    AssertionError: If any partition is not currently assigned, or if
        no partitions are assigned.
r  r  r  zSeeking to end of partition %sN)r  r   r   r   r   r   r   r   r  r   LATESTr   r  s       r7   seek_to_endKafkaConsumer.seek_to_endt  s     :F:aJq.1:FGGKLL++??AJEEE:..BBDD\F\\D   BII6;33B8K8R8RS   Gr  c                    U R                   R                  UUUS9  Ubq  SU R                  R                  l        U R                  R                  / 5        U R                  R                  R                  5         [        R                  SU5        gSU R                  R                  l        U R                  R                  U R                   R                  5       5        [        R                  SU5        g)a  Subscribe to a list of topics, or a topic regex pattern.

Partitions will be dynamically assigned via a group coordinator.
Topic subscriptions are not incremental: this list will replace the
current assignment (if there is one).

This method is incompatible with :meth:`~kafka.KafkaConsumer.assign`.

Arguments:
    topics (list): List of topics for subscription.
    pattern (str): Pattern to match available topics. You must provide
        either topics or pattern, but not both.
    listener (ConsumerRebalanceListener): Optionally include listener
        callback, which will be called before and after each rebalance
        operation.

        As part of group management, the consumer will keep track of the
        list of consumers that belong to a particular group and will
        trigger a rebalance operation if one of the following events
        trigger:

        * Number of partitions change for any of the subscribed topics
        * Topic is created or deleted
        * An existing member of the consumer group dies
        * A new member is added to the consumer group

        When any of these events are triggered, the provided listener
        will be invoked first to indicate that the consumer's assignment
        has been revoked, and then again when the new assignment has
        been received. Note that this listener will immediately override
        any listener set in a previous call to subscribe. It is
        guaranteed, however, that the partitions revoked/assigned
        through this interface are from topics subscribed in this call.

Raises:
    IllegalStateError: If called after previously calling
        :meth:`~kafka.KafkaConsumer.assign`.
    AssertionError: If neither topics or pattern is provided.
    TypeError: If listener is not a ConsumerRebalanceListener.
)rz   patternlistenerNTzSubscribed to topic pattern: %sFzSubscribed to topic(s): %s)
r   r   r   r   r   r   r   r   r   group_subscription)r   rz   r!  r"  s       r7   r   KafkaConsumer.subscribe  s    T 	$$F-4.6 	% 	8
 ;?DLL  8LL##B'LL  //1II7A;@DLL  8LL##D$6$6$I$I$KLII2F;r:   c                 z    U R                   R                  c  gU R                   R                  R                  5       $ )zDGet the current topic subscription.

Returns:
    set: {topic, ...}
N)r   subscriptionr~   r   s    r7   r&  KafkaConsumer.subscription  s4     **2!!..3355r:   c                 j   U R                   R                  5         U R                  R                  5         U R                  S   S:  a  U R                   R                  5         SU R                  R                  l        U R                  R                  / 5        [        R                  S5        SU l        g)z>Unsubscribe from all topics and clear all assigned partitions.rW   r   	   Fz;Unsubscribed all topics or patterns and assigned partitionsN)r   r   r   r   r   maybe_leave_groupr   r   r   r   r   r   r   r   s    r7   r   KafkaConsumer.unsubscribe  s     	779&&(;;}%///17<4#		OPr:   c                    U R                   (       d  gU(       a$  U R                   R                  R                  5       $ 0 n[        R                  " U R                   R                  R                  5       5       H  u  p4UR
                  U;  a  0 X#R
                  '   UR                  X#R
                     ;  a  0 X#R
                     UR                  '   UR                  5       X#R
                     UR                  '   M     U$ )zGet metrics on consumer performance.

This is ported from the Java Consumer, for details see:
https://kafka.apache.org/documentation/#consumer_monitoring

Warning:
    This is an unstable interface. It may change in future
    releases without warning.
N)r   rv   r~   r   	iteritemsgroupnamevalue)r   rawrv   kvs        r7   rv   KafkaConsumer.metrics  s     }}==((--//MM$--"7"7"<"<">?DAwwg%#% vvWWW--+- ('(wwyGGGQVV$ @ r:   c                 ^   U R                   S   S::  a'  [        SR                  U R                   S   5      5      e[        R                  " U5       H2  u  p#[        U5      X'   US:  d  M  [        SR                  X#5      5      e   U R                  R                  XR                   S   5      $ )a  Look up the offsets for the given partitions by timestamp. The
returned offset for each partition is the earliest offset whose
timestamp is greater than or equal to the given timestamp in the
corresponding partition.

This is a blocking call. The consumer does not have to be assigned the
partitions.

If the message format version in a partition is before 0.10.0, i.e.
the messages do not have timestamps, ``None`` will be returned for that
partition. ``None`` will also be returned for the partition if there
are no messages in it.

Note:
    This method may block indefinitely if the partition does not exist.

Arguments:
    timestamps (dict): ``{TopicPartition: int}`` mapping from partition
        to the timestamp to look up. Unit should be milliseconds since
        beginning of the epoch (midnight Jan 1, 1970 (UTC))

Returns:
    ``{TopicPartition: OffsetAndTimestamp}``: mapping from partition
    to the timestamp and offset of the first message with timestamp
    greater than or equal to the target timestamp.

Raises:
    ValueError: If the target timestamp is negative
    UnsupportedVersionError: If the broker does not support looking
        up the offsets by timestamp.
    KafkaTimeoutError: If fetch failed in request_timeout_ms
rW   )r   rw   r   z:offsets_for_times API not supported for cluster version {}r   zKThe target time for partition {} is {}. The target time cannot be negative.r&   )	r   r   r   r   r.  r   
ValueErrorr   offsets_by_times)r   
timestampsr   tss       r7   offsets_for_timesKafkaConsumer.offsets_for_times  s    B ;;}%3)LM235 5 mmJ/FB WJNAv **0&.: : 0 }}--$89; 	;r:   c                 V    U R                   R                  XR                  S   5      nU$ )aD  Get the first offset for the given partitions.

This method does not change the current consumer position of the
partitions.

Note:
    This method may block indefinitely if the partition does not exist.

Arguments:
    partitions (list): List of TopicPartition instances to fetch
        offsets for.

Returns:
    ``{TopicPartition: int}``: The earliest available offsets for the
    given partitions.

Raises:
    UnsupportedVersionError: If the broker does not support looking
        up the offsets by timestamp.
    KafkaTimeoutError: If fetch failed in request_timeout_ms.
r&   )r   beginning_offsetsr   r   r   r5   s      r7   r>  KafkaConsumer.beginning_offsets#  s*    , --11$89;r:   c                 V    U R                   R                  XR                  S   5      nU$ )a  Get the last offset for the given partitions. The last offset of a
partition is the offset of the upcoming message, i.e. the offset of the
last available message + 1.

This method does not change the current consumer position of the
partitions.

Note:
    This method may block indefinitely if the partition does not exist.

Arguments:
    partitions (list): List of TopicPartition instances to fetch
        offsets for.

Returns:
    ``{TopicPartition: int}``: The end offsets for the given partitions.

Raises:
    UnsupportedVersionError: If the broker does not support looking
        up the offsets by timestamp.
    KafkaTimeoutError: If fetch failed in request_timeout_ms
r&   )r   end_offsetsr   r?  s      r7   rB  KafkaConsumer.end_offsets=  s*    . --++$89;r:   c                     U R                   S   S:  a  gU R                   S   c  gU R                  R                  5       (       d  gg)zIReturn True iff this consumer can/should join a broker-coordinated group.rW   r)  Fr   T)r   r   partitions_auto_assignedr   s    r7   _use_consumer_group!KafkaConsumer._use_consumer_groupX  sC    ;;}%.[[$,##<<>>r:   c                 8   U R                   R                  5       (       a  gU R                  S   S:  a/  U R                  S   b  U R                  R	                  US9(       d  gU R                   R                  5         U R                  R                  5       (       + $ )a  Set the fetch position to the committed position (if there is one)
or reset it using the offset reset policy the user has configured.

Arguments:
    partitions (List[TopicPartition]): The partitions that need
        updating fetch positions.

Returns True if fetch positions updated, False if timeout or async reset is pending

Raises:
    NoOffsetForPartitionError: If no offset is stored for a given
        partition and no offset reset policy is defined.
TrW   r   r   r   F)r   r   r   r   #refresh_committed_offsets_if_neededreset_missing_positionsr   reset_offsets_if_needed)r   r   s     r7   r   %KafkaConsumer._update_fetch_positionsb  s     5577KK&)3KK
#/ $$HHT^H_
 	224 ==88:::r:   c              #     #    S[        SU R                  [        R                  " 5       -
  5      -  nU R                  USS9n[        R
                  " U5       H  u  p4U Hv  nU R                  R                  U5      (       d  [        R                  SU5          MC  [        UR                  S-   SS5      U R                  R                  U   l        Uv   Mx     M     g 7f)	NrJ   r   F)r   r   zNNot returning fetched records for partition %s since it is no longer fetchabler#    )maxr   timer   r   r.  r   is_fetchabler   r   r   r   r   r   )r   r   
record_mapr   r   records         r7   _message_generator_v2#KafkaConsumer._message_generator_v2  s     C4#9#9DIIK#GHH
YY*UYK
==4KB "
 ))66r::II ABDF=Nv}}_`O`bdfh=i""--b1: "	 5s   CCc                     U $ Nr4   r   s    r7   __iter__KafkaConsumer.__iter__  s    r:   c                    U R                   (       a  [        S5      eU R                  5         [        R                  " 5       U R                  :  a<  U R
                  (       d  U R                  5       U l         [        U R
                  5      $ [        5       e! [         a
    S U l         Of = f[        R                  " 5       U R                  :  a  M  NG)NzKafkaConsumer closed)r   StopIteration_set_consumer_timeoutrQ  r   r   rU  nextr   s    r7   __next__KafkaConsumer.__next__  s    << 677""$iikD222>>!%!;!;!=&DNN++ o ! &!%& iikD222s   7B B*)B*c                     U R                   S   S:  a.  [        R                  " 5       U R                   S   S-  -   U l        g g )NrK   r   g     @@)r   rQ  r   r   s    r7   r]  #KafkaConsumer._set_consumer_timeout  s>    ;;,-2%)YY[12V;&=D" 3r:   )	r   r   r   r   r   r   r   r   r   )TN)NN)FN)r   NT)TrX  )r4   NN)F)4__name__
__module____qualname____firstlineno____doc__r   r   r   socketIPPROTO_TCPTCP_NODELAYr   r
   DefaultSelectorr	   r}   r   r   r   r   r   r   r   r   r   r   rz   r   r   r   r   r   r  r  r  r  r  r  r   r&  r   rv   r;  r>  rB  rF  r   rU  rY  r_  r]  __static_attributes__r4   r:   r7   r   r      s   vn?[?_{2? 	D? 	D	?
 	d? 	,T? 	S? 	1? 	8? 	$_? 	f? 	C? 	? 	#E? 	0?  	X!?" 	d#?$ 	"4%?& 	)*H'?( 	d)?* 	-+?, 	#D-?. 	}/?0 	(*@B])^1?2 	C3?4 	5?6 	e7?8 	 9?: 	;?< 	T=?> 	F..0B0BAFG??@ 	DA?B 	"4C?D 	uU|E?F 	[G?H 	tI?J 	dK?L 	dM?N 	O?P 	tQ?R 	tS?T 	U?V 	tW?X 	tY?Z 	&t[?\ 	"=]?^ 	B_?` 	4a?b 	qc?d 	#Ee?f 	zg?h --#'##"&-%)%)#}?N@ &+"S,j2!DF8 38 DN4Q<0	-#(-^0d#0B00,6
16227<r60,;\46";H&=r:   r   )+
__future__r   r   r~   loggingrh  rQ  kafka.errorsr   r   r   kafka.vendorr   kafka.client_asyncr	   r
   kafka.consumer.fetcherr   !kafka.consumer.subscription_stater   kafka.coordinator.consumerr   !kafka.coordinator.assignors.ranger   &kafka.coordinator.assignors.roundrobinr   kafka.metricsr   r   kafka.protocol.list_offsetsr   kafka.structsr   r   
kafka.utilr   kafka.versionr   	getLoggerrc  r   Iteratorr   r4   r:   r7   <module>r~     s`    0     \ \  5 * ? : D N / ; ;  %!R=CLL R=r:   