You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

8438 lines
382 KiB
XML

<?xml version="1.0"?>
<doc>
<assembly>
<name>Confluent.Kafka</name>
</assembly>
<members>
<member name="T:Confluent.Kafka.AdminClient">
<summary>
Implements an Apache Kafka admin client.
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.DescribeConfigsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.ConfigResource},Confluent.Kafka.Admin.DescribeConfigsOptions)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.DescribeConfigsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.ConfigResource},Confluent.Kafka.Admin.DescribeConfigsOptions)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.AlterConfigsAsync(System.Collections.Generic.Dictionary{Confluent.Kafka.Admin.ConfigResource,System.Collections.Generic.List{Confluent.Kafka.Admin.ConfigEntry}},Confluent.Kafka.Admin.AlterConfigsOptions)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.AlterConfigsAsync(System.Collections.Generic.Dictionary{Confluent.Kafka.Admin.ConfigResource,System.Collections.Generic.List{Confluent.Kafka.Admin.ConfigEntry}},Confluent.Kafka.Admin.AlterConfigsOptions)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.CreateTopicsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.TopicSpecification},Confluent.Kafka.Admin.CreateTopicsOptions)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.CreateTopicsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.TopicSpecification},Confluent.Kafka.Admin.CreateTopicsOptions)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.DeleteTopicsAsync(System.Collections.Generic.IEnumerable{System.String},Confluent.Kafka.Admin.DeleteTopicsOptions)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.DeleteTopicsAsync(System.Collections.Generic.IEnumerable{System.String},Confluent.Kafka.Admin.DeleteTopicsOptions)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.CreatePartitionsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.PartitionsSpecification},Confluent.Kafka.Admin.CreatePartitionsOptions)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.CreatePartitionsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.PartitionsSpecification},Confluent.Kafka.Admin.CreatePartitionsOptions)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.DeleteRecordsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset},Confluent.Kafka.Admin.DeleteRecordsOptions)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.DeleteRecordsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset},Confluent.Kafka.Admin.DeleteRecordsOptions)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.#ctor(Confluent.Kafka.Handle)">
<summary>
Initialize a new AdminClient instance.
</summary>
<param name="handle">
An underlying librdkafka client handle that the AdminClient will use to
make broker requests. It is valid to provide either a Consumer, Producer
or AdminClient handle.
</param>
</member>
<member name="M:Confluent.Kafka.AdminClient.ListGroups(System.TimeSpan)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.ListGroups(System.TimeSpan)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.ListGroup(System.String,System.TimeSpan)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.ListGroup(System.String,System.TimeSpan)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.GetMetadata(System.TimeSpan)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.GetMetadata(System.TimeSpan)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.GetMetadata(System.String,System.TimeSpan)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IAdminClient.GetMetadata(System.String,System.TimeSpan)" />
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.AddBrokers(System.String)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IClient.AddBrokers(System.String)" />
</summary>
</member>
<member name="P:Confluent.Kafka.AdminClient.Name">
<summary>
Refer to <see cref="P:Confluent.Kafka.IClient.Name" />
</summary>
</member>
<member name="P:Confluent.Kafka.AdminClient.Handle">
<summary>
An opaque reference to the underlying librdkafka
client instance.
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.Dispose">
<summary>
Releases all resources used by this AdminClient. In the current
implementation, this method may block for up to 100ms. This
will be replaced with a non-blocking version in the future.
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClient.Dispose(System.Boolean)">
<summary>
Releases the unmanaged resources used by the
<see cref="T:Confluent.Kafka.AdminClient" />
and optionally disposes the managed resources.
</summary>
<param name="disposing">
true to release both managed and unmanaged resources;
false to release only unmanaged resources.
</param>
</member>
<member name="T:Confluent.Kafka.AdminClientBuilder">
<summary>
A builder for <see cref="T:Confluent.Kafka.IAdminClient" />.
</summary>
</member>
<member name="P:Confluent.Kafka.AdminClientBuilder.Config">
<summary>
The config dictionary.
</summary>
</member>
<member name="P:Confluent.Kafka.AdminClientBuilder.ErrorHandler">
<summary>
The configured error handler.
</summary>
</member>
<member name="P:Confluent.Kafka.AdminClientBuilder.LogHandler">
<summary>
The configured log handler.
</summary>
</member>
<member name="P:Confluent.Kafka.AdminClientBuilder.StatisticsHandler">
<summary>
The configured statistics handler.
</summary>
</member>
<member name="P:Confluent.Kafka.AdminClientBuilder.OAuthBearerTokenRefreshHandler">
<summary>
The configured OAuthBearer Token Refresh handler.
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClientBuilder.#ctor(System.Collections.Generic.IEnumerable{System.Collections.Generic.KeyValuePair{System.String,System.String}})">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.AdminClientBuilder" /> instance.
</summary>
<param name="config">
A collection of librdkafka configuration parameters
(refer to https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
and parameters specific to this client (refer to:
<see cref="T:Confluent.Kafka.ConfigPropertyNames" />).
At a minimum, 'bootstrap.servers' must be specified.
</param>
</member>
<member name="M:Confluent.Kafka.AdminClientBuilder.SetStatisticsHandler(System.Action{Confluent.Kafka.IAdminClient,System.String})">
<summary>
Set the handler to call on statistics events. Statistics are provided
as a JSON formatted string as defined here:
https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
</summary>
<remarks>
You can enable statistics by setting the statistics interval
using the statistics.interval.ms configuration parameter
(disabled by default).
Executes on the poll thread (a background thread managed by
the admin client).
</remarks>
</member>
<member name="M:Confluent.Kafka.AdminClientBuilder.SetErrorHandler(System.Action{Confluent.Kafka.IAdminClient,Confluent.Kafka.Error})">
<summary>
Set the handler to call on error events e.g. connection failures or all
brokers down. Note that the client will try to automatically recover from
errors that are not marked as fatal. Non-fatal errors should be interpreted
as informational rather than catastrophic.
</summary>
<remarks>
Executes on the poll thread (a background thread managed by the admin
client).
</remarks>
</member>
<member name="M:Confluent.Kafka.AdminClientBuilder.SetLogHandler(System.Action{Confluent.Kafka.IAdminClient,Confluent.Kafka.LogMessage})">
<summary>
Set the handler to call when there is information available
to be logged. If not specified, a default callback that writes
to stderr will be used.
</summary>
<remarks>
By default not many log messages are generated.
For more verbose logging, specify one or more debug contexts
using the 'debug' configuration property.
Warning: Log handlers are called spontaneously from internal
librdkafka threads and the application must not call any
Confluent.Kafka APIs from within a log handler or perform any
prolonged operations.
</remarks>
</member>
<member name="M:Confluent.Kafka.AdminClientBuilder.SetOAuthBearerTokenRefreshHandler(System.Action{Confluent.Kafka.IProducer{Confluent.Kafka.Null,Confluent.Kafka.Null},System.String})">
<summary>
Set SASL/OAUTHBEARER token refresh callback in provided
conf object. The SASL/OAUTHBEARER token refresh callback
is triggered via <see cref="T:Confluent.Kafka.IAdminClient"/>'s admin methods
(or any of its overloads) whenever OAUTHBEARER is the SASL
mechanism and a token needs to be retrieved, typically
based on the configuration defined in
sasl.oauthbearer.config. The callback should invoke
<see cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetToken(Confluent.Kafka.IClient,System.String,System.Int64,System.String,System.Collections.Generic.IDictionary{System.String,System.String})"/>
or <see cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetTokenFailure(Confluent.Kafka.IClient,System.String)"/>
to indicate success or failure, respectively.
An unsecured JWT refresh handler is provided by librdkafka
for development and testing purposes, it is enabled by
setting the enable.sasl.oauthbearer.unsecure.jwt property
to true and is mutually exclusive to using a refresh callback.
</summary>
<param name="oAuthBearerTokenRefreshHandler">
the callback to set; callback function arguments:
IProducer - instance of the admin client's inner producer instance
which should be used to set token or token failure string - Value of configuration
property sasl.oauthbearer.config
</param>
</member>
<member name="M:Confluent.Kafka.AdminClientBuilder.Build">
<summary>
Build the <see cref="T:Confluent.Kafka.AdminClient" /> instance.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.AlterConfigsException">
<summary>
Represents an error that occured during an alter configs request.
</summary>
</member>
<member name="M:Confluent.Kafka.Admin.AlterConfigsException.#ctor(System.Collections.Generic.List{Confluent.Kafka.Admin.AlterConfigsReport})">
<summary>
Initializes a new instance of AlterConfigsException.
</summary>
<param name="results">
The result corresponding to all ConfigResources in the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.Admin.AlterConfigsException.Results">
<summary>
The result corresponding to all ConfigResources in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.AlterConfigsOptions">
<summary>
Options for the AlterConfigs method.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.AlterConfigsOptions.RequestTimeout">
<summary>
The overall request timeout, including broker lookup, request
transmission, operation time on broker, and response. If set
to null, the default request timeout for the AdminClient will
be used.
Default: null
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.AlterConfigsOptions.ValidateOnly">
<summary>
If true, the request should be validated only without altering
the configs.
Default: false
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.AlterConfigsReport">
<summary>
The result of an alter config request for a specific resource.
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.AlterConfigsReport.ConfigResource">
<summary>
The resource the result corresponds to.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.AlterConfigsReport.Error">
<summary>
The error (or success) of the alter config request.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.ConfigEntry">
<summary>
Encapsulates a config property name / value pair.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntry.Name">
<summary>
The config name.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntry.Value">
<summary>
The config value.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.ConfigEntryResult">
<summary>
A config property entry, as reported by the Kafka admin api.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntryResult.IsDefault">
<summary>
Whether or not the config value is the default or was
explicitly set.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntryResult.IsReadOnly">
<summary>
Whether or not the config is read-only (cannot be updated).
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntryResult.IsSensitive">
<summary>
Whether or not the config value is sensitive. The value
for sensitive configuration values is always returned
as null.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntryResult.Name">
<summary>
The config name.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntryResult.Value">
<summary>
The config value.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntryResult.Source">
<summary>
The config source. Refer to
<see cref="T:Confluent.Kafka.Admin.ConfigSource" /> for
more information.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigEntryResult.Synonyms">
<summary>
All config values that may be used as the value of this
config along with their source, in the order of precedence.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.ConfigResource">
<summary>
A class representing resources that have configs.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigResource.Type">
<summary>
The resource type (required)
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigResource.Name">
<summary>
The resource name (required)
</summary>
</member>
<member name="M:Confluent.Kafka.Admin.ConfigResource.Equals(System.Object)">
<summary>
Tests whether this ConfigResource instance is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is a ConfigResource and all properties are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Admin.ConfigResource.GetHashCode">
<summary>
Returns a hash code for this ConfigResource.
</summary>
<returns>
An integer that specifies a hash value for this ConfigResource.
</returns>
</member>
<member name="M:Confluent.Kafka.Admin.ConfigResource.op_Equality(Confluent.Kafka.Admin.ConfigResource,Confluent.Kafka.Admin.ConfigResource)">
<summary>
Tests whether ConfigResource instance a is equal to ConfigResource instance b.
</summary>
<param name="a">
The first ConfigResource instance to compare.
</param>
<param name="b">
The second ConfigResource instance to compare.
</param>
<returns>
true if ConfigResource instances a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Admin.ConfigResource.op_Inequality(Confluent.Kafka.Admin.ConfigResource,Confluent.Kafka.Admin.ConfigResource)">
<summary>
Tests whether ConfigResource instance a is not equal to ConfigResource instance b.
</summary>
<param name="a">
The first ConfigResource instance to compare.
</param>
<param name="b">
The second ConfigResource instance to compare.
</param>
<returns>
true if ConfigResource instances a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Admin.ConfigResource.ToString">
<summary>
Returns a string representation of the ConfigResource object.
</summary>
<returns>
A string representation of the ConfigResource object.
</returns>
</member>
<member name="T:Confluent.Kafka.Admin.ConfigSource">
<summary>
Enumerates the different config sources.
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ConfigSource.UnknownConfig">
<summary>
Unknown
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ConfigSource.DynamicTopicConfig">
<summary>
Dynamic Topic
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ConfigSource.DynamicBrokerConfig">
<summary>
Dynamic Broker
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ConfigSource.DynamicDefaultBrokerConfig">
<summary>
Dynamic Default Broker
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ConfigSource.StaticBrokerConfig">
<summary>
Static
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ConfigSource.DefaultConfig">
<summary>
Default
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.ConfigSynonym">
<summary>
Describes a synonym of a config entry.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigSynonym.Name">
<summary>
The config name.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigSynonym.Value">
<summary>
The config value.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.ConfigSynonym.Source">
<summary>
The config source. Refer to
<see cref="T:Confluent.Kafka.Admin.ConfigSource" /> for
more information.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.CreatePartitionsException">
<summary>
Represents an error that occured during a create partitions request.
</summary>
</member>
<member name="M:Confluent.Kafka.Admin.CreatePartitionsException.#ctor(System.Collections.Generic.List{Confluent.Kafka.Admin.CreatePartitionsReport})">
<summary>
Initialize a new instance of CreatePartitionsException.
</summary>
<param name="results">
The result corresponding to all topics in the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.Admin.CreatePartitionsException.Results">
<summary>
The result corresponding to all topics in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.CreatePartitionsOptions">
<summary>
Options for the CreatePartitions method.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreatePartitionsOptions.ValidateOnly">
<summary>
If true, the request should be validated only without creating the partitions.
Default: false
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreatePartitionsOptions.RequestTimeout">
<summary>
The overall request timeout, including broker lookup, request
transmission, operation time on broker, and response. If set
to null, the default request timeout for the AdminClient will
be used.
Default: null
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreatePartitionsOptions.OperationTimeout">
<summary>
The broker's operation timeout - the maximum time to wait for
CreatePartitions before returning a result to the application.
If set to null, will return immediately upon triggering partition
creation.
Default: null
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.CreatePartitionsReport">
<summary>
The result of a create partitions request for a specific topic.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreatePartitionsReport.Topic">
<summary>
The topic.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreatePartitionsReport.Error">
<summary>
The error (or success) of the create partitions request.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.CreateTopicReport">
<summary>
The result of a request to create a specific topic.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreateTopicReport.Topic">
<summary>
The topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreateTopicReport.Error">
<summary>
The error (or success) of the create topic request.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.CreateTopicsException">
<summary>
Represents an error that occured during a create topics request.
</summary>
</member>
<member name="M:Confluent.Kafka.Admin.CreateTopicsException.#ctor(System.Collections.Generic.List{Confluent.Kafka.Admin.CreateTopicReport})">
<summary>
Initialize a new instance of CreateTopicsException.
</summary>
<param name="results">
The result corresponding to all topics in the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.Admin.CreateTopicsException.Results">
<summary>
The result corresponding to all topics in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.CreateTopicsOptions">
<summary>
Options for the CreateTopics method.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreateTopicsOptions.ValidateOnly">
<summary>
If true, the request should be validated on the broker only
without creating the topic.
Default: false
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreateTopicsOptions.RequestTimeout">
<summary>
The overall request timeout, including broker lookup, request
transmission, operation time on broker, and response. If set
to null, the default request timeout for the AdminClient will
be used.
Default: null
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.CreateTopicsOptions.OperationTimeout">
<summary>
The broker's operation timeout - the maximum time to wait for
CreateTopics before returning a result to the application.
If set to null, will return immediately upon triggering topic
creation.
Default: null
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DeleteRecordsException">
<summary>
Represents an error that occured during a delete records request.
</summary>
</member>
<member name="M:Confluent.Kafka.Admin.DeleteRecordsException.#ctor(System.Collections.Generic.List{Confluent.Kafka.Admin.DeleteRecordsReport})">
<summary>
Initializes a new DeleteRecordsException.
</summary>
<param name="results">
The result corresponding to all topic partitions in the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsException.Results">
<summary>
The result corresponding to all topics partitions in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DeleteRecordsOptions">
<summary>
Options for the DeleteRecordsAsync method.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsOptions.RequestTimeout">
<summary>
The overall request timeout, including broker lookup, request
transmission, operation time on broker, and response. If set
to null, the default request timeout for the AdminClient will
be used.
Default: null
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsOptions.OperationTimeout">
<summary>
The broker's operation timeout - the maximum time to wait for
DeleteRecordsAsync before returning a result to the application.
If set to null, will return immediately upon triggering record
deletion.
Default: null
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DeleteRecordsReport">
<summary>
The per-partition result of delete records request
(including error status).
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsReport.Topic">
<summary>
The topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsReport.Partition">
<summary>
The partition.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsReport.Offset">
<summary>
Post-deletion low-watermark (smallest available offset of all
live replicas).
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsReport.Error">
<summary>
Per-partition error status.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DeleteRecordsResult">
<summary>
The per-partition result of delete records request.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsResult.Topic">
<summary>
The topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsResult.Partition">
<summary>
The partition.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteRecordsResult.Offset">
<summary>
Post-deletion low-watermark offset (smallest available offset of all
live replicas).
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DeleteTopicReport">
<summary>
The result of a request to delete a specific topic.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteTopicReport.Topic">
<summary>
The topic.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteTopicReport.Error">
<summary>
The error (or success) of the delete topic request.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DeleteTopicsException">
<summary>
Represents an error that occured during a delete topics request.
</summary>
</member>
<member name="M:Confluent.Kafka.Admin.DeleteTopicsException.#ctor(System.Collections.Generic.List{Confluent.Kafka.Admin.DeleteTopicReport})">
<summary>
Initializes a new DeleteTopicsException.
</summary>
<param name="results">
The result corresponding to all topics in the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteTopicsException.Results">
<summary>
The result corresponding to all topics in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DeleteTopicsOptions">
<summary>
Options for the DeleteTopics method.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteTopicsOptions.RequestTimeout">
<summary>
The overall request timeout, including broker lookup, request
transmission, operation time on broker, and response. If set
to null, the default request timeout for the AdminClient will
be used.
Default: null
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DeleteTopicsOptions.OperationTimeout">
<summary>
The broker's operation timeout - the maximum time to wait for
DeleteTopics before returning a result to the application.
If set to null, will return immediately upon triggering topic
deletion.
Default: null
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DescribeConfigsException">
<summary>
Represents an error that occured during a describe configs request.
</summary>
</member>
<member name="M:Confluent.Kafka.Admin.DescribeConfigsException.#ctor(System.Collections.Generic.List{Confluent.Kafka.Admin.DescribeConfigsReport})">
<summary>
Initializes a new instance of DescribeConfigsException.
</summary>
<param name="results">
The result corresponding to all ConfigResource in the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.Admin.DescribeConfigsException.Results">
<summary>
The result corresponding to all ConfigResources in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DescribeConfigsOptions">
<summary>
Options for the DescribeConfigs method.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DescribeConfigsOptions.RequestTimeout">
<summary>
The overall request timeout, including broker lookup, request
transmission, operation time on broker, and response. If set
to null, the default request timeout for the AdminClient will
be used.
Default: null
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DescribeConfigsReport">
<summary>
The result of a request to describe the configs of a specific resource.
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.DescribeConfigsReport.ConfigResource">
<summary>
The resource associated with the describe configs request.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DescribeConfigsReport.Entries">
<summary>
Configuration entries for the specified resource.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DescribeConfigsReport.Error">
<summary>
The error (or success) of the describe config request.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.DescribeConfigsResult">
<summary>
The result of a request to describe the configs of a specific resource.
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.DescribeConfigsResult.ConfigResource">
<summary>
The resource associated with the describe configs request.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.DescribeConfigsResult.Entries">
<summary>
Configuration entries for the specified resource.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.PartitionsSpecification">
<summary>
Specification for new partitions to be added to a topic.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.PartitionsSpecification.Topic">
<summary>
The topic that the new partitions specification corresponds to.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.PartitionsSpecification.ReplicaAssignments">
<summary>
The replica assignments for the new partitions, or null if the assignment
will be done by the controller. The outer list is indexed by the new
partitions relative index, and the inner list contains the broker ids.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.PartitionsSpecification.IncreaseTo">
<summary>
The partition count for the specified topic is increased to this value.
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.ResourceType">
<summary>
Enumerates the set of configuration resource types.
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ResourceType.Unknown">
<summary>
Unknown resource
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ResourceType.Any">
<summary>
Any resource
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ResourceType.Topic">
<summary>
Topic resource
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ResourceType.Group">
<summary>
Group resource
</summary>
</member>
<member name="F:Confluent.Kafka.Admin.ResourceType.Broker">
<summary>
Broker resource
</summary>
</member>
<member name="T:Confluent.Kafka.Admin.TopicSpecification">
<summary>
Specification of a new topic to be created via the CreateTopics
method. This class is used for the same purpose as NewTopic in
the Java API.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.TopicSpecification.Configs">
<summary>
The configuration to use to create the new topic.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.TopicSpecification.Name">
<summary>
The name of the topic to be created (required).
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.TopicSpecification.NumPartitions">
<summary>
The number of partitions for the new topic or -1 (the default) if a
replica assignment is specified.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.TopicSpecification.ReplicasAssignments">
<summary>
A map from partition id to replica ids (i.e., static broker ids) or null
if the number of partitions and replication factor are specified
instead.
</summary>
</member>
<member name="P:Confluent.Kafka.Admin.TopicSpecification.ReplicationFactor">
<summary>
The replication factor for the new topic or -1 (the default) if a
replica assignment is specified instead.
</summary>
</member>
<member name="T:Confluent.Kafka.BrokerMetadata">
<summary>
Metadata pertaining to a single Kafka broker.
</summary>
</member>
<member name="M:Confluent.Kafka.BrokerMetadata.#ctor(System.Int32,System.String,System.Int32)">
<summary>
Initializes a new BrokerMetadata class instance.
</summary>
<param name="brokerId">
The Kafka broker id.
</param>
<param name="host">
The Kafka broker hostname.
</param>
<param name="port">
The Kafka broker port.
</param>
</member>
<member name="P:Confluent.Kafka.BrokerMetadata.BrokerId">
<summary>
Gets the Kafka broker id.
</summary>
</member>
<member name="P:Confluent.Kafka.BrokerMetadata.Host">
<summary>
Gets the Kafka broker hostname.
</summary>
</member>
<member name="P:Confluent.Kafka.BrokerMetadata.Port">
<summary>
Gets the Kafka broker port.
</summary>
</member>
<member name="M:Confluent.Kafka.BrokerMetadata.ToString">
<summary>
Returns a JSON representation of the BrokerMetadata object.
</summary>
<returns>
A JSON representation of the BrokerMetadata object.
</returns>
</member>
<member name="T:Confluent.Kafka.ClientExtensions">
<summary>
IClient extension methods
</summary>
</member>
<member name="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetToken(Confluent.Kafka.IClient,System.String,System.Int64,System.String,System.Collections.Generic.IDictionary{System.String,System.String})">
<summary>
Set SASL/OAUTHBEARER token and metadata.
The SASL/OAUTHBEARER token refresh callback or
event handler should invoke this method upon
success. The extension keys must not include
the reserved key "`auth`", and all extension
keys and values must conform to the required
format as per https://tools.ietf.org/html/rfc7628#section-3.1:
</summary>
<param name="client">
the instance of a <see cref="T:Confluent.Kafka.IClient"/>
</param>
<param name="tokenValue">
the mandatory token value to set, often (but
not necessarily) a JWS compact serialization
as per https://tools.ietf.org/html/rfc7515#section-3.1.
</param>
<param name="lifetimeMs">
when the token expires, in terms of the number
of milliseconds since the epoch.
</param>
<param name="principalName">
the mandatory Kafka principal name associated
with the token.
</param>
<param name="extensions">
optional SASL extensions dictionary, to be
communicated to the broker as additional key-value
pairs during the initial client response as per
https://tools.ietf.org/html/rfc7628#section-3.1.
</param>
<seealso cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetTokenFailure(Confluent.Kafka.IClient,System.String)"/>
</member>
<member name="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetTokenFailure(Confluent.Kafka.IClient,System.String)">
<summary>
SASL/OAUTHBEARER token refresh failure indicator.
The SASL/OAUTHBEARER token refresh callback or
event handler should invoke this method upon failure.
</summary>
<param name="client">
the instance of a <see cref="T:Confluent.Kafka.IClient"/>
</param>
<param name="error">
mandatory human readable error reason for failing
to acquire a token.
</param>
<seealso cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetToken(Confluent.Kafka.IClient,System.String,System.Int64,System.String,System.Collections.Generic.IDictionary{System.String,System.String})"/>
</member>
<member name="T:Confluent.Kafka.CommittedOffsets">
<summary>
Encapsulates information provided to a Consumer's OnOffsetsCommitted
event - per-partition offsets and success/error together with overall
success/error of the commit operation.
</summary>
<remarks>
Possible error conditions:
- Entire request failed: Error is set, but not per-partition errors.
- All partitions failed: Error is set to the value of the last failed partition, but each partition may have different errors.
- Some partitions failed: global error is success.
</remarks>
</member>
<member name="M:Confluent.Kafka.CommittedOffsets.#ctor(System.Collections.Generic.IList{Confluent.Kafka.TopicPartitionOffsetError},Confluent.Kafka.Error)">
<summary>
Initializes a new instance of CommittedOffsets.
</summary>
<param name="offsets">
per-partition offsets and success/error.
</param>
<param name="error">
overall operation success/error.
</param>
</member>
<member name="P:Confluent.Kafka.CommittedOffsets.Error">
<summary>
Gets the overall operation success/error.
</summary>
</member>
<member name="P:Confluent.Kafka.CommittedOffsets.Offsets">
<summary>
Gets the per-partition offsets and success/error.
</summary>
</member>
<member name="T:Confluent.Kafka.Config">
<summary>
Base functionality common to all configuration classes.
</summary>
</member>
<member name="M:Confluent.Kafka.Config.#ctor">
<summary>
Initialize a new empty <see cref="T:Confluent.Kafka.Config" /> instance.
</summary>
</member>
<member name="M:Confluent.Kafka.Config.#ctor(Confluent.Kafka.Config)">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.Config" /> instance based on
an existing <see cref="T:Confluent.Kafka.Config" /> instance.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="M:Confluent.Kafka.Config.#ctor(System.Collections.Generic.IDictionary{System.String,System.String})">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.Config" /> wrapping
an existing key/value dictionary.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="M:Confluent.Kafka.Config.Set(System.String,System.String)">
<summary>
Set a configuration property using a string key / value pair.
</summary>
<remarks>
Two scenarios where this is useful: 1. For setting librdkafka
plugin config properties. 2. You are using a different version of
librdkafka to the one provided as a dependency of the Confluent.Kafka
package and the configuration properties have evolved.
</remarks>
<param name="key">
The configuration property name.
</param>
<param name="val">
The property value.
</param>
</member>
<member name="M:Confluent.Kafka.Config.Get(System.String)">
<summary>
Gets a configuration property value given a key. Returns null if
the property has not been set.
</summary>
<param name="key">
The configuration property to get.
</param>
<returns>
The configuration property value.
</returns>
</member>
<member name="M:Confluent.Kafka.Config.GetInt(System.String)">
<summary>
Gets a configuration property int? value given a key.
</summary>
<param name="key">
The configuration property to get.
</param>
<returns>
The configuration property value.
</returns>
</member>
<member name="M:Confluent.Kafka.Config.GetBool(System.String)">
<summary>
Gets a configuration property bool? value given a key.
</summary>
<param name="key">
The configuration property to get.
</param>
<returns>
The configuration property value.
</returns>
</member>
<member name="M:Confluent.Kafka.Config.GetDouble(System.String)">
<summary>
Gets a configuration property double? value given a key.
</summary>
<param name="key">
The configuration property to get.
</param>
<returns>
The configuration property value.
</returns>
</member>
<member name="M:Confluent.Kafka.Config.GetEnum(System.Type,System.String)">
<summary>
Gets a configuration property enum value given a key.
</summary>
<param name="key">
The configuration property to get.
</param>
<param name="type">
The enum type of the configuration property.
</param>
<returns>
The configuration property value.
</returns>
</member>
<member name="M:Confluent.Kafka.Config.SetObject(System.String,System.Object)">
<summary>
Set a configuration property using a key / value pair (null checked).
</summary>
</member>
<member name="F:Confluent.Kafka.Config.properties">
<summary>
The configuration properties.
</summary>
</member>
<member name="M:Confluent.Kafka.Config.GetEnumerator">
<summary>
Returns an enumerator that iterates through the property collection.
</summary>
<returns>
An enumerator that iterates through the property collection.
</returns>
</member>
<member name="M:Confluent.Kafka.Config.System#Collections#IEnumerable#GetEnumerator">
<summary>
Returns an enumerator that iterates through the property collection.
</summary>
<returns>
An enumerator that iterates through the property collection.
</returns>
</member>
<member name="P:Confluent.Kafka.Config.CancellationDelayMaxMs">
<summary>
The maximum length of time (in milliseconds) before a cancellation request
is acted on. Low values may result in measurably higher CPU usage.
default: 100
range: 1 &lt;= dotnet.cancellation.delay.max.ms &lt;= 10000
importance: low
</summary>
</member>
<member name="T:Confluent.Kafka.ConfigPropertyNames">
<summary>
Names of all configuration properties specific to the
.NET Client.
</summary>
</member>
<member name="T:Confluent.Kafka.ConfigPropertyNames.Producer">
<summary>
Producer specific configuration properties.
</summary>
</member>
<member name="F:Confluent.Kafka.ConfigPropertyNames.Producer.EnableBackgroundPoll">
<summary>
Specifies whether or not the producer should start a background poll
thread to receive delivery reports and event notifications. Generally,
this should be set to true. If set to false, you will need to call
the Poll function manually.
default: true
</summary>
</member>
<member name="F:Confluent.Kafka.ConfigPropertyNames.Producer.EnableDeliveryReports">
<summary>
Specifies whether to enable notification of delivery reports. Typically
you should set this parameter to true. Set it to false for "fire and
forget" semantics and a small boost in performance.
default: true
</summary>
</member>
<member name="F:Confluent.Kafka.ConfigPropertyNames.Producer.DeliveryReportFields">
<summary>
A comma separated list of fields that may be optionally set in delivery
reports. Disabling delivery report fields that you do not require will
improve maximum throughput and reduce memory usage. Allowed values:
key, value, timestamp, headers, status, all, none.
default: all
</summary>
</member>
<member name="T:Confluent.Kafka.ConfigPropertyNames.Consumer">
<summary>
Consumer specific configuration properties.
</summary>
</member>
<member name="F:Confluent.Kafka.ConfigPropertyNames.Consumer.ConsumeResultFields">
<summary>
A comma separated list of fields that may be optionally set
in <see cref="T:Confluent.Kafka.ConsumeResult`2" />
objects returned by the
<see cref="M:Confluent.Kafka.Consumer`2.Consume(System.TimeSpan)" />
method. Disabling fields that you do not require will improve
throughput and reduce memory consumption. Allowed values:
headers, timestamp, topic, all, none
default: all
</summary>
</member>
<member name="F:Confluent.Kafka.ConfigPropertyNames.CancellationDelayMaxMs">
<summary>
The maximum length of time (in milliseconds) before a cancellation request
is acted on. Low values may result in measurably higher CPU usage.
default: 100
range: 1 &lt;= dotnet.cancellation.delay.max.ms &lt;= 10000
</summary>
</member>
<member name="T:Confluent.Kafka.Partitioner">
<summary>
Partitioner enum values
</summary>
</member>
<member name="F:Confluent.Kafka.Partitioner.Random">
<summary>
Random
</summary>
</member>
<member name="F:Confluent.Kafka.Partitioner.Consistent">
<summary>
Consistent
</summary>
</member>
<member name="F:Confluent.Kafka.Partitioner.ConsistentRandom">
<summary>
ConsistentRandom
</summary>
</member>
<member name="F:Confluent.Kafka.Partitioner.Murmur2">
<summary>
Murmur2
</summary>
</member>
<member name="F:Confluent.Kafka.Partitioner.Murmur2Random">
<summary>
Murmur2Random
</summary>
</member>
<member name="T:Confluent.Kafka.AutoOffsetReset">
<summary>
AutoOffsetReset enum values
</summary>
</member>
<member name="F:Confluent.Kafka.AutoOffsetReset.Latest">
<summary>
Latest
</summary>
</member>
<member name="F:Confluent.Kafka.AutoOffsetReset.Earliest">
<summary>
Earliest
</summary>
</member>
<member name="F:Confluent.Kafka.AutoOffsetReset.Error">
<summary>
Error
</summary>
</member>
<member name="T:Confluent.Kafka.BrokerAddressFamily">
<summary>
BrokerAddressFamily enum values
</summary>
</member>
<member name="F:Confluent.Kafka.BrokerAddressFamily.Any">
<summary>
Any
</summary>
</member>
<member name="F:Confluent.Kafka.BrokerAddressFamily.V4">
<summary>
V4
</summary>
</member>
<member name="F:Confluent.Kafka.BrokerAddressFamily.V6">
<summary>
V6
</summary>
</member>
<member name="T:Confluent.Kafka.SecurityProtocol">
<summary>
SecurityProtocol enum values
</summary>
</member>
<member name="F:Confluent.Kafka.SecurityProtocol.Plaintext">
<summary>
Plaintext
</summary>
</member>
<member name="F:Confluent.Kafka.SecurityProtocol.Ssl">
<summary>
Ssl
</summary>
</member>
<member name="F:Confluent.Kafka.SecurityProtocol.SaslPlaintext">
<summary>
SaslPlaintext
</summary>
</member>
<member name="F:Confluent.Kafka.SecurityProtocol.SaslSsl">
<summary>
SaslSsl
</summary>
</member>
<member name="T:Confluent.Kafka.SslEndpointIdentificationAlgorithm">
<summary>
SslEndpointIdentificationAlgorithm enum values
</summary>
</member>
<member name="F:Confluent.Kafka.SslEndpointIdentificationAlgorithm.None">
<summary>
None
</summary>
</member>
<member name="F:Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https">
<summary>
Https
</summary>
</member>
<member name="T:Confluent.Kafka.PartitionAssignmentStrategy">
<summary>
PartitionAssignmentStrategy enum values
</summary>
</member>
<member name="F:Confluent.Kafka.PartitionAssignmentStrategy.Range">
<summary>
Range
</summary>
</member>
<member name="F:Confluent.Kafka.PartitionAssignmentStrategy.RoundRobin">
<summary>
RoundRobin
</summary>
</member>
<member name="F:Confluent.Kafka.PartitionAssignmentStrategy.CooperativeSticky">
<summary>
CooperativeSticky
</summary>
</member>
<member name="T:Confluent.Kafka.IsolationLevel">
<summary>
IsolationLevel enum values
</summary>
</member>
<member name="F:Confluent.Kafka.IsolationLevel.ReadUncommitted">
<summary>
ReadUncommitted
</summary>
</member>
<member name="F:Confluent.Kafka.IsolationLevel.ReadCommitted">
<summary>
ReadCommitted
</summary>
</member>
<member name="T:Confluent.Kafka.CompressionType">
<summary>
CompressionType enum values
</summary>
</member>
<member name="F:Confluent.Kafka.CompressionType.None">
<summary>
None
</summary>
</member>
<member name="F:Confluent.Kafka.CompressionType.Gzip">
<summary>
Gzip
</summary>
</member>
<member name="F:Confluent.Kafka.CompressionType.Snappy">
<summary>
Snappy
</summary>
</member>
<member name="F:Confluent.Kafka.CompressionType.Lz4">
<summary>
Lz4
</summary>
</member>
<member name="F:Confluent.Kafka.CompressionType.Zstd">
<summary>
Zstd
</summary>
</member>
<member name="T:Confluent.Kafka.SaslMechanism">
<summary>
SaslMechanism enum values
</summary>
</member>
<member name="F:Confluent.Kafka.SaslMechanism.Gssapi">
<summary>
GSSAPI
</summary>
</member>
<member name="F:Confluent.Kafka.SaslMechanism.Plain">
<summary>
PLAIN
</summary>
</member>
<member name="F:Confluent.Kafka.SaslMechanism.ScramSha256">
<summary>
SCRAM-SHA-256
</summary>
</member>
<member name="F:Confluent.Kafka.SaslMechanism.ScramSha512">
<summary>
SCRAM-SHA-512
</summary>
</member>
<member name="F:Confluent.Kafka.SaslMechanism.OAuthBearer">
<summary>
OAUTHBEARER
</summary>
</member>
<member name="T:Confluent.Kafka.Acks">
<summary>
Acks enum values
</summary>
</member>
<member name="F:Confluent.Kafka.Acks.None">
<summary>
None
</summary>
</member>
<member name="F:Confluent.Kafka.Acks.Leader">
<summary>
Leader
</summary>
</member>
<member name="F:Confluent.Kafka.Acks.All">
<summary>
All
</summary>
</member>
<member name="T:Confluent.Kafka.ClientConfig">
<summary>
Configuration common to all clients
</summary>
</member>
<member name="M:Confluent.Kafka.ClientConfig.#ctor">
<summary>
Initialize a new empty <see cref="T:Confluent.Kafka.ClientConfig" /> instance.
</summary>
</member>
<member name="M:Confluent.Kafka.ClientConfig.#ctor(Confluent.Kafka.ClientConfig)">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.ClientConfig" /> instance wrapping
an existing <see cref="T:Confluent.Kafka.ClientConfig" /> instance.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="M:Confluent.Kafka.ClientConfig.#ctor(System.Collections.Generic.IDictionary{System.String,System.String})">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.ClientConfig" /> instance wrapping
an existing key/value pair collection.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslMechanism">
<summary>
SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512. **NOTE**: Despite the name, you may not configure more than one mechanism.
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.Acks">
<summary>
This field indicates the number of acknowledgements the leader broker must receive from ISR brokers
before responding to the request: Zero=Broker does not send any response/ack to client, One=The
leader will write the record to its local log but will respond without awaiting full acknowledgement
from all followers. All=Broker will block until message is committed by all in sync replicas (ISRs).
If there are less than min.insync.replicas (broker configuration) in the ISR set the produce request
will fail.
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ClientId">
<summary>
Client identifier.
default: rdkafka
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.BootstrapServers">
<summary>
Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime.
default: ''
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.MessageMaxBytes">
<summary>
Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation).
default: 1000000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.MessageCopyMaxBytes">
<summary>
Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs.
default: 65535
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ReceiveMessageMaxBytes">
<summary>
Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set.
default: 100000000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.MaxInFlight">
<summary>
Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one.
default: 1000000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.TopicMetadataRefreshIntervalMs">
<summary>
Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s.
default: 300000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.MetadataMaxAgeMs">
<summary>
Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3
default: 900000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.TopicMetadataRefreshFastIntervalMs">
<summary>
When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers.
default: 250
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.TopicMetadataRefreshSparse">
<summary>
Sparse metadata requests (consumes less network bandwidth)
default: true
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.TopicMetadataPropagationMaxMs">
<summary>
Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce().
default: 30000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.TopicBlacklist">
<summary>
Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.Debug">
<summary>
A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch
default: ''
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SocketTimeoutMs">
<summary>
Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value.
default: 60000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SocketSendBufferBytes">
<summary>
Broker socket send buffer size. System default is used if 0.
default: 0
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SocketReceiveBufferBytes">
<summary>
Broker socket receive buffer size. System default is used if 0.
default: 0
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SocketKeepaliveEnable">
<summary>
Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets
default: false
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SocketNagleDisable">
<summary>
Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.
default: false
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SocketMaxFails">
<summary>
Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established.
default: 1
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.BrokerAddressTtl">
<summary>
How long to cache the broker address resolving results (milliseconds).
default: 1000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.BrokerAddressFamily">
<summary>
Allowed broker IP address families: any, v4, v6
default: any
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ConnectionsMaxIdleMs">
<summary>
Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info).
default: 0
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ReconnectBackoffMs">
<summary>
The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately.
default: 100
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ReconnectBackoffMaxMs">
<summary>
The maximum time to wait before reconnecting to a broker after the connection has been closed.
default: 10000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.StatisticsIntervalMs">
<summary>
librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics.
default: 0
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.LogQueue">
<summary>
Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set.
default: false
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.LogThreadName">
<summary>
Print internal thread name in log messages (useful for debugging librdkafka internals)
default: true
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.EnableRandomSeed">
<summary>
If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new().
default: true
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.LogConnectionClose">
<summary>
Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connection.max.idle.ms` value.
default: true
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.InternalTerminationSignal">
<summary>
Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed.
default: 0
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ApiVersionRequest">
<summary>
Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used.
default: true
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ApiVersionRequestTimeoutMs">
<summary>
Timeout for broker API version requests.
default: 10000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ApiVersionFallbackMs">
<summary>
Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade).
default: 0
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.BrokerVersionFallback">
<summary>
Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests.
default: 0.10.0
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SecurityProtocol">
<summary>
Protocol used to communicate with brokers.
default: plaintext
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCipherSuites">
<summary>
A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3).
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCurvesList">
<summary>
The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslSigalgsList">
<summary>
The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslKeyLocation">
<summary>
Path to client's private key (PEM) used for authentication.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslKeyPassword">
<summary>
Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`)
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslKeyPem">
<summary>
Client's private key string (PEM format) used for authentication.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCertificateLocation">
<summary>
Path to client's public key (PEM) used for authentication.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCertificatePem">
<summary>
Client's public key string (PEM format) used for authentication.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCaLocation">
<summary>
File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`).
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCaPem">
<summary>
CA certificate string (PEM format) for verifying the broker's key.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCaCertificateStores">
<summary>
Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA.
default: Root
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslCrlLocation">
<summary>
Path to CRL for verifying broker's certificate validity.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslKeystoreLocation">
<summary>
Path to client's keystore (PKCS#12) used for authentication.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslKeystorePassword">
<summary>
Client's keystore (PKCS#12) password.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslEngineLocation">
<summary>
Path to OpenSSL engine library. OpenSSL >= 1.1.0 required.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslEngineId">
<summary>
OpenSSL engine id is the name used for loading engine.
default: dynamic
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.EnableSslCertificateVerification">
<summary>
Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb.
default: true
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SslEndpointIdentificationAlgorithm">
<summary>
Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required.
default: none
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslKerberosServiceName">
<summary>
Kerberos principal name that Kafka runs as, not including /hostname@REALM
default: kafka
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslKerberosPrincipal">
<summary>
This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal).
default: kafkaclient
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslKerberosKinitCmd">
<summary>
Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value.
default: kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} || kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal}
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslKerberosKeytab">
<summary>
Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t "%{sasl.kerberos.keytab}"`.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslKerberosMinTimeBeforeRelogin">
<summary>
Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0.
default: 60000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslUsername">
<summary>
SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms
default: ''
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslPassword">
<summary>
SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism
default: ''
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.SaslOauthbearerConfig">
<summary>
SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123`
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.EnableSaslOauthbearerUnsecureJwt">
<summary>
Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production.
default: false
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.PluginLibraryPaths">
<summary>
List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically.
default: ''
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ClientConfig.ClientRack">
<summary>
A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`.
default: ''
importance: low
</summary>
</member>
<member name="T:Confluent.Kafka.AdminClientConfig">
<summary>
AdminClient configuration properties
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClientConfig.#ctor">
<summary>
Initialize a new empty <see cref="T:Confluent.Kafka.AdminClientConfig" /> instance.
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClientConfig.#ctor(Confluent.Kafka.ClientConfig)">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.AdminClientConfig" /> instance wrapping
an existing <see cref="T:Confluent.Kafka.ClientConfig" /> instance.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="M:Confluent.Kafka.AdminClientConfig.#ctor(System.Collections.Generic.IDictionary{System.String,System.String})">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.AdminClientConfig" /> instance wrapping
an existing key/value pair collection.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="T:Confluent.Kafka.ProducerConfig">
<summary>
Producer configuration properties
</summary>
</member>
<member name="M:Confluent.Kafka.ProducerConfig.#ctor">
<summary>
Initialize a new empty <see cref="T:Confluent.Kafka.ProducerConfig" /> instance.
</summary>
</member>
<member name="M:Confluent.Kafka.ProducerConfig.#ctor(Confluent.Kafka.ClientConfig)">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.ProducerConfig" /> instance wrapping
an existing <see cref="T:Confluent.Kafka.ClientConfig" /> instance.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="M:Confluent.Kafka.ProducerConfig.#ctor(System.Collections.Generic.IDictionary{System.String,System.String})">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.ProducerConfig" /> instance wrapping
an existing key/value pair collection.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.EnableBackgroundPoll">
<summary>
Specifies whether or not the producer should start a background poll
thread to receive delivery reports and event notifications. Generally,
this should be set to true. If set to false, you will need to call
the Poll function manually.
default: true
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.EnableDeliveryReports">
<summary>
Specifies whether to enable notification of delivery reports. Typically
you should set this parameter to true. Set it to false for "fire and
forget" semantics and a small boost in performance.
default: true
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.DeliveryReportFields">
<summary>
A comma separated list of fields that may be optionally set in delivery
reports. Disabling delivery report fields that you do not require will
improve maximum throughput and reduce memory usage. Allowed values:
key, value, timestamp, headers, status, all, none.
default: all
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.RequestTimeoutMs">
<summary>
The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0.
default: 30000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.MessageTimeoutMs">
<summary>
Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured.
default: 300000
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.Partitioner">
<summary>
Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned).
default: consistent_random
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.CompressionLevel">
<summary>
Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level.
default: -1
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.TransactionalId">
<summary>
Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0.
default: ''
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.TransactionTimeoutMs">
<summary>
The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods.
default: 60000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.EnableIdempotence">
<summary>
When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible.
default: false
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.EnableGaplessGuarantee">
<summary>
**EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`.
default: false
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.QueueBufferingMaxMessages">
<summary>
Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions.
default: 100000
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.QueueBufferingMaxKbytes">
<summary>
Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages.
default: 1048576
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.LingerMs">
<summary>
Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency.
default: 5
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.MessageSendMaxRetries">
<summary>
How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true.
default: 2147483647
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.RetryBackoffMs">
<summary>
The backoff time in milliseconds before retrying a protocol request.
default: 100
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.QueueBufferingBackpressureThreshold">
<summary>
The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines.
default: 1
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.CompressionType">
<summary>
compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`.
default: none
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.BatchNumMessages">
<summary>
Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes.
default: 10000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.BatchSize">
<summary>
Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes.
default: 1000000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerConfig.StickyPartitioningLingerMs">
<summary>
Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages.
default: 10
importance: low
</summary>
</member>
<member name="T:Confluent.Kafka.ConsumerConfig">
<summary>
Consumer configuration properties
</summary>
</member>
<member name="M:Confluent.Kafka.ConsumerConfig.#ctor">
<summary>
Initialize a new empty <see cref="T:Confluent.Kafka.ConsumerConfig" /> instance.
</summary>
</member>
<member name="M:Confluent.Kafka.ConsumerConfig.#ctor(Confluent.Kafka.ClientConfig)">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.ConsumerConfig" /> instance wrapping
an existing <see cref="T:Confluent.Kafka.ClientConfig" /> instance.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="M:Confluent.Kafka.ConsumerConfig.#ctor(System.Collections.Generic.IDictionary{System.String,System.String})">
<summary>
Initialize a new <see cref="T:Confluent.Kafka.ConsumerConfig" /> instance wrapping
an existing key/value pair collection.
This will change the values "in-place" i.e. operations on this class WILL modify the provided collection
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.ConsumeResultFields">
<summary>
A comma separated list of fields that may be optionally set
in <see cref="T:Confluent.Kafka.ConsumeResult`2" />
objects returned by the
<see cref="M:Confluent.Kafka.Consumer`2.Consume(System.TimeSpan)" />
method. Disabling fields that you do not require will improve
throughput and reduce memory consumption. Allowed values:
headers, timestamp, topic, all, none
default: all
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.AutoOffsetReset">
<summary>
Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'.
default: largest
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.GroupId">
<summary>
Client group id string. All clients sharing the same group.id belong to the same group.
default: ''
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.GroupInstanceId">
<summary>
Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0.
default: ''
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.PartitionAssignmentStrategy">
<summary>
The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky.
default: range,roundrobin
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.SessionTimeoutMs">
<summary>
Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`.
default: 45000
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.HeartbeatIntervalMs">
<summary>
Group session keepalive heartbeat interval.
default: 3000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.GroupProtocolType">
<summary>
Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`.
default: consumer
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.CoordinatorQueryIntervalMs">
<summary>
How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment.
default: 600000
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.MaxPollIntervalMs">
<summary>
Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information.
default: 300000
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.EnableAutoCommit">
<summary>
Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign().
default: true
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.AutoCommitIntervalMs">
<summary>
The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer.
default: 5000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.EnableAutoOffsetStore">
<summary>
Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition.
default: true
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.QueuedMinMessages">
<summary>
Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue.
default: 100000
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.QueuedMaxMessagesKbytes">
<summary>
Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages.
default: 65536
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.FetchWaitMaxMs">
<summary>
Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages.
default: 500
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.MaxPartitionFetchBytes">
<summary>
Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched.
default: 1048576
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.FetchMaxBytes">
<summary>
Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config).
default: 52428800
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.FetchMinBytes">
<summary>
Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting.
default: 1
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.FetchErrorBackoffMs">
<summary>
How long to postpone the next fetch request for a topic+partition in case of a fetch error.
default: 500
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.IsolationLevel">
<summary>
Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted.
default: read_committed
importance: high
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.EnablePartitionEof">
<summary>
Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition.
default: false
importance: low
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.CheckCrcs">
<summary>
Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage.
default: false
importance: medium
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerConfig.AllowAutoCreateTopics">
<summary>
Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuraiton to take effect. Note: The default value (false) is different from the Java consumer (true). Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies.
default: false
importance: low
</summary>
</member>
<member name="T:Confluent.Kafka.ConsumeException">
<summary>
Represents an error that occured during message consumption.
</summary>
</member>
<member name="M:Confluent.Kafka.ConsumeException.#ctor(Confluent.Kafka.ConsumeResult{System.Byte[],System.Byte[]},Confluent.Kafka.Error,System.Exception)">
<summary>
Initialize a new instance of ConsumeException
</summary>
<param name="consumerRecord">
An object that provides information know about the consumer
record for which the error occured.
</param>
<param name="error">
The error that occured.
</param>
<param name="innerException">
The exception instance that caused this exception.
</param>
</member>
<member name="M:Confluent.Kafka.ConsumeException.#ctor(Confluent.Kafka.ConsumeResult{System.Byte[],System.Byte[]},Confluent.Kafka.Error)">
<summary>
Initialize a new instance of ConsumeException
</summary>
<param name="consumerRecord">
An object that provides information know about the consumer
record for which the error occured.
</param>
<param name="error">
The error that occured.
</param>
</member>
<member name="P:Confluent.Kafka.ConsumeException.ConsumerRecord">
<summary>
An object that provides information known about the consumer
record for which the error occured.
</summary>
</member>
<member name="T:Confluent.Kafka.Consumer`2">
<summary>
Implements a high-level Apache Kafka consumer with
deserialization capability.
</summary>
</member>
<member name="F:Confluent.Kafka.Consumer`2.assignCallCount">
<summary>
keeps track of whether or not assign has been called during
invocation of a rebalance callback event.
</summary>
</member>
<member name="P:Confluent.Kafka.Consumer`2.Assignment">
<inheritdoc/>
</member>
<member name="P:Confluent.Kafka.Consumer`2.Subscription">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Subscribe(System.Collections.Generic.IEnumerable{System.String})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Subscribe(System.String)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Unsubscribe">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Assign(Confluent.Kafka.TopicPartition)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Assign(Confluent.Kafka.TopicPartitionOffset)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Assign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Assign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.IncrementalAssign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.IncrementalAssign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.IncrementalUnassign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Unassign">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.StoreOffset(Confluent.Kafka.ConsumeResult{`0,`1})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.StoreOffset(Confluent.Kafka.TopicPartitionOffset)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Commit">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Commit(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Commit(Confluent.Kafka.ConsumeResult{`0,`1})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Seek(Confluent.Kafka.TopicPartitionOffset)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Pause(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Resume(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Committed(System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Committed(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition},System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Position(Confluent.Kafka.TopicPartition)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.OffsetsForTimes(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionTimestamp},System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.GetWatermarkOffsets(Confluent.Kafka.TopicPartition)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.QueryWatermarkOffsets(Confluent.Kafka.TopicPartition,System.TimeSpan)">
<inheritdoc/>
</member>
<member name="P:Confluent.Kafka.Consumer`2.MemberId">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.AddBrokers(System.String)">
<inheritdoc/>
</member>
<member name="P:Confluent.Kafka.Consumer`2.Name">
<inheritdoc/>
</member>
<member name="P:Confluent.Kafka.Consumer`2.Handle">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Close">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Dispose">
<summary>
Releases all resources used by this Consumer without
committing offsets and without alerting the group coordinator
that the consumer is exiting the group. If you do not call
<see cref="M:Confluent.Kafka.Consumer`2.Close" /> or
<see cref="M:Confluent.Kafka.Consumer`2.Unsubscribe" />
prior to Dispose, the group will rebalance after a timeout
specified by group's `session.timeout.ms`.
You should commit offsets / unsubscribe from the group before
calling this method (typically by calling
<see cref="M:Confluent.Kafka.Consumer`2.Close" />).
</summary>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Dispose(System.Boolean)">
<summary>
Releases the unmanaged resources used by the
<see cref="T:Confluent.Kafka.Consumer`2" />
and optionally disposes the managed resources.
</summary>
<param name="disposing">
true to release both managed and unmanaged resources;
false to release only unmanaged resources.
</param>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Consume(System.Int32)">
<summary>
Refer to <see cref="M:Confluent.Kafka.IConsumer`2.Consume(System.Int32)" />
</summary>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Consume(System.Threading.CancellationToken)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Consumer`2.Consume(System.TimeSpan)">
<inheritdoc/>
</member>
<member name="P:Confluent.Kafka.Consumer`2.ConsumerGroupMetadata">
<inheritdoc/>
</member>
<member name="T:Confluent.Kafka.ConsumerBuilder`2">
<summary>
A builder class for <see cref="T:Confluent.Kafka.IConsumer`2" />.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.Config">
<summary>
The config dictionary.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.ErrorHandler">
<summary>
The configured error handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.LogHandler">
<summary>
The configured log handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.StatisticsHandler">
<summary>
The configured statistics handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.OAuthBearerTokenRefreshHandler">
<summary>
The configured OAuthBearer Token Refresh handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.KeyDeserializer">
<summary>
The configured key deserializer.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.ValueDeserializer">
<summary>
The configured value deserializer.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.PartitionsAssignedHandler">
<summary>
The configured partitions assigned handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.PartitionsRevokedHandler">
<summary>
The configured partitions revoked handler.
</summary>
</member>
<member name="F:Confluent.Kafka.ConsumerBuilder`2.RevokedOrLostHandlerIsFunc">
<summary>
Whether or not the user configured either PartitionsRevokedHandler or PartitionsLostHandler
as a Func (as opposed to an Action).
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.PartitionsLostHandler">
<summary>
The configured partitions lost handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumerBuilder`2.OffsetsCommittedHandler">
<summary>
The configured offsets committed handler.
</summary>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.#ctor(System.Collections.Generic.IEnumerable{System.Collections.Generic.KeyValuePair{System.String,System.String}})">
<summary>
Initialize a new ConsumerBuilder instance.
</summary>
<param name="config">
A collection of librdkafka configuration parameters
(refer to https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
and parameters specific to this client (refer to:
<see cref="T:Confluent.Kafka.ConfigPropertyNames" />).
At a minimum, 'bootstrap.servers' and 'group.id' must be
specified.
</param>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetStatisticsHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},System.String})">
<summary>
Set the handler to call on statistics events. Statistics
are provided as a JSON formatted string as defined here:
https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
</summary>
<remarks>
You can enable statistics and set the statistics interval
using the StatisticsIntervalMs configuration property
(disabled by default).
Executes as a side-effect of the Consume method (on the same
thread).
Exceptions: Any exception thrown by your statistics handler
will be wrapped in a ConsumeException with ErrorCode
ErrorCode.Local_Application and thrown by the initiating call
to Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetErrorHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},Confluent.Kafka.Error})">
<summary>
Set the handler to call on error events e.g. connection failures or all
brokers down. Note that the client will try to automatically recover from
errors that are not marked as fatal. Non-fatal errors should be interpreted
as informational rather than catastrophic.
</summary>
<remarks>
Executes as a side-effect of the Consume method (on the same thread).
Exceptions: Any exception thrown by your error handler will be silently
ignored.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetLogHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},Confluent.Kafka.LogMessage})">
<summary>
Set the handler to call when there is information available
to be logged. If not specified, a default callback that writes
to stderr will be used.
</summary>
<remarks>
By default not many log messages are generated.
For more verbose logging, specify one or more debug contexts
using the 'Debug' configuration property.
Warning: Log handlers are called spontaneously from internal
librdkafka threads and the application must not call any
Confluent.Kafka APIs from within a log handler or perform any
prolonged operations.
Exceptions: Any exception thrown by your log handler will be
silently ignored.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetOAuthBearerTokenRefreshHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},System.String})">
<summary>
Set SASL/OAUTHBEARER token refresh callback in provided
conf object. The SASL/OAUTHBEARER token refresh callback
is triggered via <see cref="M:Confluent.Kafka.IConsumer`2.Consume(System.Int32)"/>
(or any of its overloads) whenever OAUTHBEARER is the SASL
mechanism and a token needs to be retrieved, typically
based on the configuration defined in
sasl.oauthbearer.config. The callback should invoke
<see cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetToken(Confluent.Kafka.IClient,System.String,System.Int64,System.String,System.Collections.Generic.IDictionary{System.String,System.String})"/>
or <see cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetTokenFailure(Confluent.Kafka.IClient,System.String)"/>
to indicate success or failure, respectively.
An unsecured JWT refresh handler is provided by librdkafka
for development and testing purposes, it is enabled by
setting the enable.sasl.oauthbearer.unsecure.jwt property
to true and is mutually exclusive to using a refresh callback.
</summary>
<param name="oAuthBearerTokenRefreshHandler">
the callback to set; callback function arguments:
IConsumer - instance of the consumer which should be used to
set token or token failure string - Value of configuration
property sasl.oauthbearer.config
</param>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetKeyDeserializer(Confluent.Kafka.IDeserializer{`0})">
<summary>
Set the deserializer to use to deserialize keys.
</summary>
<remarks>
If your key deserializer throws an exception, this will be
wrapped in a ConsumeException with ErrorCode
Local_KeyDeserialization and thrown by the initiating call to
Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetValueDeserializer(Confluent.Kafka.IDeserializer{`1})">
<summary>
Set the deserializer to use to deserialize values.
</summary>
<remarks>
If your value deserializer throws an exception, this will be
wrapped in a ConsumeException with ErrorCode
Local_ValueDeserialization and thrown by the initiating call to
Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetPartitionsAssignedHandler(System.Func{Confluent.Kafka.IConsumer{`0,`1},System.Collections.Generic.List{Confluent.Kafka.TopicPartition},System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset}})">
<summary>
Specify a handler that will be called when a new consumer group partition assignment has
been received by this consumer.
The actual partitions to consume from and start offsets are specified by the return value
of the handler. Partition offsets may be a specific offset, or special value (Beginning, End
or Unset). If Unset, consumption will resume from the last committed offset for each
partition, or if there is no committed offset, in accordance with the `auto.offset.reset`
configuration property.
Kafka supports two rebalance protocols: EAGER (range and roundrobin assignors) and
COOPERATIVE (incremental) (cooperative-sticky assignor). Use the PartitionAssignmentStrategy
configuration property to specify which assignor to use.
## EAGER Rebalancing (range, roundrobin)
The set of partitions returned from your handler may differ from that provided by the
group (though they should typically be the same). These partitions are the
entire set of partitions to consume from. There will be exactly one call to the
partitions revoked or partitions lost handler (if they have been set using
SetPartitionsRevokedHandler / SetPartitionsLostHandler) corresponding to every call to
this handler.
## COOPERATIVE (Incremental) Rebalancing
The set of partitions returned from your handler must match that provided by the
group. These partitions are an incremental assignment - are in addition to those
already being consumed from.
</summary>
<remarks>
Executes as a side-effect of the Consumer.Consume call (on the same thread).
(Incremental)Assign/Unassign must not be called in the handler.
Exceptions: Any exception thrown by your partitions assigned handler will be wrapped
in a ConsumeException with ErrorCode ErrorCode.Local_Application and thrown by the
initiating call to Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetPartitionsAssignedHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},System.Collections.Generic.List{Confluent.Kafka.TopicPartition}})">
<summary>
Specify a handler that will be called when a new consumer group partition assignment has
been received by this consumer.
Following execution of the handler, consumption will resume from the last committed offset
for each partition, or if there is no committed offset, in accordance with the
`auto.offset.reset` configuration property.
Kafka supports two rebalance protocols: EAGER (range and roundrobin assignors) and
COOPERATIVE (incremental) (cooperative-sticky assignor). Use the PartitionAssignmentStrategy
configuration property to specify which assignor to use.
## EAGER Rebalancing (range, roundrobin)
Partitions passed to the handler represent the entire set of partitions to consume from.
There will be exactly one call to the partitions revoked or partitions lost handler (if
they have been set using SetPartitionsRevokedHandler / SetPartitionsLostHandler)
corresponding to every call to this handler.
## COOPERATIVE (Incremental) Rebalancing
Partitions passed to the handler are an incremental assignment - are in addition to those
already being consumed from.
</summary>
<remarks>
Executes as a side-effect of the Consumer.Consume call (on the same thread).
(Incremental)Assign/Unassign must not be called in the handler.
Exceptions: Any exception thrown by your partitions assigned handler will be wrapped
in a ConsumeException with ErrorCode ErrorCode.Local_Application and thrown by the
initiating call to Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetPartitionsRevokedHandler(System.Func{Confluent.Kafka.IConsumer{`0,`1},System.Collections.Generic.List{Confluent.Kafka.TopicPartitionOffset},System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset}})">
<summary>
Specify a handler that will be called immediately prior to the consumer's current assignment
being revoked, allowing the application to take action (e.g. offsets committed to a custom
store) before the consumer gives up ownership of the partitions. The Func partitions revoked
handler variant is not supported in the incremental rebalancing (COOPERATIVE) case.
The value returned from your handler specifies the partitions/offsets the consumer should
be assigned to read from following completion of this method (most typically empty). This
partitions revoked handler variant may not be specified when incremental rebalancing is in use
- in that case, the set of partitions the consumer is reading from may never deviate from
the set that it has been assigned by the group.
The second parameter provided to the handler provides the set of partitions the consumer is
currently assigned to, and the current position of the consumer on each of these partitions.
</summary>
<remarks>
Executes as a side-effect of the Consumer.Consume/Close/Dispose call (on the same thread).
(Incremental)Assign/Unassign must not be called in the handler.
Exceptions: Any exception thrown by your partitions revoked handler will be wrapped in a
ConsumeException with ErrorCode ErrorCode.Local_Application and thrown by the initiating call
to Consume/Close.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetPartitionsRevokedHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},System.Collections.Generic.List{Confluent.Kafka.TopicPartitionOffset}})">
<summary>
Specify a handler that will be called immediately prior to partitions being revoked
from the consumer's current assignment, allowing the application to take action
(e.g. commit offsets to a custom store) before the consumer gives up ownership of
the partitions.
Kafka supports two rebalance protocols: EAGER (range and roundrobin assignors) and
COOPERATIVE (incremental) (cooperative-sticky assignor). Use the PartitionAssignmentStrategy
configuration property to specify which assignor to use.
## EAGER Rebalancing (range, roundrobin)
The second parameter provides the entire set of partitions the consumer is currently
assigned to, and the current position of the consumer on each of these partitions.
The consumer will stop consuming from all partitions following execution of this
handler.
## COOPERATIVE (Incremental) Rebalancing
The second parameter provides the subset of the partitions assigned to the consumer
which are being revoked, and the current position of the consumer on each of these
partitions. The consumer will stop consuming from this set of partitions following
execution of this handler, and continue reading from any remaining partitions.
</summary>
<remarks>
May execute as a side-effect of the Consumer.Consume/Close/Dispose call (on the same
thread).
(Incremental)Assign/Unassign must not be called in the handler.
Exceptions: Any exception thrown by your partitions revoked handler will be wrapped
in a ConsumeException with ErrorCode ErrorCode.Local_Application and thrown by the
initiating call to Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetPartitionsLostHandler(System.Func{Confluent.Kafka.IConsumer{`0,`1},System.Collections.Generic.List{Confluent.Kafka.TopicPartitionOffset},System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset}})">
<summary>
Specify a handler that will be called when the consumer detects that it has lost ownership
of its partition assignment (fallen out of the group). The application should not commit
offsets in this case, since the partitions will likely be owned by other consumers in the
group (offset commits to Kafka will likely fail).
The value returned from your handler specifies the partitions/offsets the consumer should
be assigned to read from following completion of this method (most typically empty). This
partitions lost handler variant may not be specified when incremental rebalancing is in use
- in that case, the set of partitions the consumer is reading from may never deviate from
the set that it has been assigned by the group.
The second parameter provided to the handler provides the set of all partitions the consumer
is currently assigned to, and the current position of the consumer on each of these partitions.
Following completion of this handler, the consumer will stop consuming from all partitions.
If this handler is not specified, the partitions revoked handler (if specified) will be called
instead if partitions are lost.
</summary>
<remarks>
May execute as a side-effect of the Consumer.Consume/Close/Dispose call (on the same
thread).
(Incremental)Assign/Unassign must not be called in the handler.
Exceptions: Any exception thrown by your partitions revoked handler will be wrapped
in a ConsumeException with ErrorCode ErrorCode.Local_Application and thrown by the
initiating call to Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetPartitionsLostHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},System.Collections.Generic.List{Confluent.Kafka.TopicPartitionOffset}})">
<summary>
Specify a handler that will be called when the consumer detects that it has lost ownership
of its partition assignment (fallen out of the group). The application should not commit
offsets in this case, since the partitions will likely be owned by other consumers in the
group (offset commits to Kafka will likely fail).
The second parameter provided to the handler provides the set of all partitions the consumer
is currently assigned to, and the current position of the consumer on each of these partitions.
If this handler is not specified, the partitions revoked handler (if specified) will be called
instead if partitions are lost.
</summary>
<remarks>
May execute as a side-effect of the Consumer.Consume/Close/Dispose call (on the same
thread).
(Incremental)Assign/Unassign must not be called in the handler.
Exceptions: Any exception thrown by your partitions revoked handler will be wrapped
in a ConsumeException with ErrorCode ErrorCode.Local_Application and thrown by the
initiating call to Consume.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.SetOffsetsCommittedHandler(System.Action{Confluent.Kafka.IConsumer{`0,`1},Confluent.Kafka.CommittedOffsets})">
<summary>
A handler that is called to report the result of (automatic) offset
commits. It is not called as a result of the use of the Commit method.
</summary>
<remarks>
Executes as a side-effect of the Consumer.Consume call (on the same thread).
Exceptions: Any exception thrown by your offsets committed handler
will be wrapped in a ConsumeException with ErrorCode
ErrorCode.Local_Application and thrown by the initiating call to Consume/Close.
</remarks>
</member>
<member name="M:Confluent.Kafka.ConsumerBuilder`2.Build">
<summary>
Build a new IConsumer implementation instance.
</summary>
</member>
<member name="T:Confluent.Kafka.ConsumeResult`2">
<summary>
Represents a message consumed from a Kafka cluster.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Topic">
<summary>
The topic associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Partition">
<summary>
The partition associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Offset">
<summary>
The partition offset associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.TopicPartition">
<summary>
The TopicPartition associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.TopicPartitionOffset">
<summary>
The TopicPartitionOffset associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Message">
<summary>
The Kafka message, or null if this ConsumeResult
instance represents an end of partition event.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Key">
<summary>
The Kafka message Key.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Value">
<summary>
The Kafka message Value.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Timestamp">
<summary>
The Kafka message timestamp.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.Headers">
<summary>
The Kafka message headers.
</summary>
</member>
<member name="P:Confluent.Kafka.ConsumeResult`2.IsPartitionEOF">
<summary>
True if this instance represents an end of partition
event, false if it represents a message in kafka.
</summary>
</member>
<member name="T:Confluent.Kafka.IConsumerGroupMetadata">
<summary>
The consumer group metadata associated with a consumer.
</summary>
</member>
<member name="T:Confluent.Kafka.DeliveryReport`2">
<summary>
The result of a produce request.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryReport`2.Error">
<summary>
An error (or NoError) associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryReport`2.TopicPartitionOffsetError">
<summary>
The TopicPartitionOffsetError associated with the message.
</summary>
</member>
<member name="T:Confluent.Kafka.DeliveryResult`2">
<summary>
Encapsulates the result of a successful produce request.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Topic">
<summary>
The topic associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Partition">
<summary>
The partition associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Offset">
<summary>
The partition offset associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.TopicPartition">
<summary>
The TopicPartition associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.TopicPartitionOffset">
<summary>
The TopicPartitionOffset associated with the message.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Status">
<summary>
The persistence status of the message
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Message">
<summary>
The Kafka message.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Key">
<summary>
The Kafka message Key.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Value">
<summary>
The Kafka message Value.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Timestamp">
<summary>
The Kafka message timestamp.
</summary>
</member>
<member name="P:Confluent.Kafka.DeliveryResult`2.Headers">
<summary>
The Kafka message headers.
</summary>
</member>
<member name="T:Confluent.Kafka.DependentAdminClientBuilder">
<summary>
A builder class for <see cref="T:Confluent.Kafka.IAdminClient" /> instance
implementations that leverage an existing client handle.
</summary>
</member>
<member name="P:Confluent.Kafka.DependentAdminClientBuilder.Handle">
<summary>
The configured client handle.
</summary>
</member>
<member name="M:Confluent.Kafka.DependentAdminClientBuilder.#ctor(Confluent.Kafka.Handle)">
<summary>
An underlying librdkafka client handle that the AdminClient.
</summary>
</member>
<member name="M:Confluent.Kafka.DependentAdminClientBuilder.Build">
<summary>
Build a new IAdminClient implementation instance.
</summary>
</member>
<member name="T:Confluent.Kafka.DependentProducerBuilder`2">
<summary>
A builder class for <see cref="T:Confluent.Kafka.IProducer`2" /> instance
implementations that leverage an existing client handle.
[API-SUBJECT-TO-CHANGE] - This class may be removed in the future
in favor of an improved API for this functionality.
</summary>
</member>
<member name="P:Confluent.Kafka.DependentProducerBuilder`2.Handle">
<summary>
The configured client handle.
</summary>
</member>
<member name="P:Confluent.Kafka.DependentProducerBuilder`2.KeySerializer">
<summary>
The configured key serializer.
</summary>
</member>
<member name="P:Confluent.Kafka.DependentProducerBuilder`2.ValueSerializer">
<summary>
The configured value serializer.
</summary>
</member>
<member name="P:Confluent.Kafka.DependentProducerBuilder`2.AsyncKeySerializer">
<summary>
The configured async key serializer.
</summary>
</member>
<member name="P:Confluent.Kafka.DependentProducerBuilder`2.AsyncValueSerializer">
<summary>
The configured async value serializer.
</summary>
</member>
<member name="M:Confluent.Kafka.DependentProducerBuilder`2.#ctor(Confluent.Kafka.Handle)">
<summary>
An underlying librdkafka client handle that the Producer will use to
make broker requests. The handle must be from another Producer
instance (not Consumer or AdminClient).
</summary>
</member>
<member name="M:Confluent.Kafka.DependentProducerBuilder`2.SetKeySerializer(Confluent.Kafka.ISerializer{`0})">
<summary>
The serializer to use to serialize keys.
</summary>
</member>
<member name="M:Confluent.Kafka.DependentProducerBuilder`2.SetValueSerializer(Confluent.Kafka.ISerializer{`1})">
<summary>
The serializer to use to serialize values.
</summary>
</member>
<member name="M:Confluent.Kafka.DependentProducerBuilder`2.SetKeySerializer(Confluent.Kafka.IAsyncSerializer{`0})">
<summary>
The async serializer to use to serialize keys.
</summary>
</member>
<member name="M:Confluent.Kafka.DependentProducerBuilder`2.SetValueSerializer(Confluent.Kafka.IAsyncSerializer{`1})">
<summary>
The async serializer to use to serialize values.
</summary>
</member>
<member name="M:Confluent.Kafka.DependentProducerBuilder`2.Build">
<summary>
Build a new IProducer implementation instance.
</summary>
</member>
<member name="T:Confluent.Kafka.Deserializers">
<summary>
Deserializers for use with <see cref="T:Confluent.Kafka.Consumer`2" />.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.Utf8">
<summary>
String (UTF8 encoded) deserializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.Null">
<summary>
Null value deserializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.Ignore">
<summary>
Deserializer that deserializes any value to null.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.Int64">
<summary>
System.Int64 (big endian encoded, network byte ordered) deserializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.Int32">
<summary>
System.Int32 (big endian encoded, network byte ordered) deserializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.Single">
<summary>
System.Single (big endian encoded, network byte ordered) deserializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.Double">
<summary>
System.Double (big endian encoded, network byte ordered) deserializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Deserializers.ByteArray">
<summary>
System.Byte[] (nullable) deserializer.
</summary>
<remarks>
Byte ordering is original order.
</remarks>
</member>
<member name="T:Confluent.Kafka.Error">
<summary>
Represents an error that occured when interacting with a
Kafka broker or the librdkafka library.
</summary>
</member>
<member name="M:Confluent.Kafka.Error.#ctor(Confluent.Kafka.Error)">
<summary>
Initialize a new Error instance that is a copy of another.
</summary>
<param name="error">
The error object to initialize from.
</param>
</member>
<member name="M:Confluent.Kafka.Error.#ctor(System.IntPtr)">
<summary>
Initialize a new Error instance from a native pointer to
a rd_kafka_error_t object, then destroy the native object.
</summary>
</member>
<member name="M:Confluent.Kafka.Error.#ctor(Confluent.Kafka.ErrorCode)">
<summary>
Initialize a new Error instance from a particular
<see cref="T:Confluent.Kafka.ErrorCode"/> value.
</summary>
<param name="code">
The <see cref="T:Confluent.Kafka.ErrorCode"/> value associated with this Error.
</param>
<remarks>
The reason string associated with this Error will
be a static value associated with the <see cref="T:Confluent.Kafka.ErrorCode"/>.
</remarks>
</member>
<member name="M:Confluent.Kafka.Error.#ctor(Confluent.Kafka.ErrorCode,System.String,System.Boolean)">
<summary>
Initialize a new Error instance.
</summary>
<param name="code">
The error code.
</param>
<param name="reason">
The error reason. If null, this will be a static value
associated with the error.
</param>
<param name="isFatal">
Whether or not the error is fatal.
</param>
<exception cref="T:System.ArgumentException">
</exception>
</member>
<member name="M:Confluent.Kafka.Error.#ctor(Confluent.Kafka.ErrorCode,System.String)">
<summary>
Initialize a new Error instance from a particular
<see cref="T:Confluent.Kafka.ErrorCode"/> value and custom <paramref name="reason"/>
string.
</summary>
<param name="code">
The <see cref="T:Confluent.Kafka.ErrorCode"/> value associated with this Error.
</param>
<param name="reason">
A custom reason string associated with the error
(overriding the static string associated with
<paramref name="code"/>).
</param>
</member>
<member name="P:Confluent.Kafka.Error.Code">
<summary>
Gets the <see cref="T:Confluent.Kafka.ErrorCode"/> associated with this Error.
</summary>
</member>
<member name="P:Confluent.Kafka.Error.IsFatal">
<summary>
Whether or not the error is fatal.
</summary>
</member>
<member name="P:Confluent.Kafka.Error.IsRetriable">
<summary>
Whether or not the operation that caused the error is retriable.
</summary>
</member>
<member name="P:Confluent.Kafka.Error.TxnRequiresAbort">
<summary>
Whether or not the current transaction is abortable
following the error.
</summary>
<remarks>
This is only relevant for the transactional producer
API.
</remarks>
</member>
<member name="P:Confluent.Kafka.Error.Reason">
<summary>
Gets a human readable reason string associated with this error.
</summary>
</member>
<member name="P:Confluent.Kafka.Error.IsError">
<summary>
true if Code != ErrorCode.NoError.
</summary>
</member>
<member name="P:Confluent.Kafka.Error.IsLocalError">
<summary>
true if this is error originated locally (within librdkafka), false otherwise.
</summary>
</member>
<member name="P:Confluent.Kafka.Error.IsBrokerError">
<summary>
true if this error originated on a broker, false otherwise.
</summary>
</member>
<member name="M:Confluent.Kafka.Error.op_Implicit(Confluent.Kafka.Error)~Confluent.Kafka.ErrorCode">
<summary>
Converts the specified Error value to the value of it's Code property.
</summary>
<param name="e">
The Error value to convert.
</param>
</member>
<member name="M:Confluent.Kafka.Error.op_Implicit(Confluent.Kafka.ErrorCode)~Confluent.Kafka.Error">
<summary>
Converts the specified <see cref="T:Confluent.Kafka.ErrorCode"/> value to it's corresponding rich Error value.
</summary>
<param name="c">
The <see cref="T:Confluent.Kafka.ErrorCode"/> value to convert.
</param>
</member>
<member name="M:Confluent.Kafka.Error.Equals(System.Object)">
<summary>
Tests whether this Error instance is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is an Error and the Code property values are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Error.GetHashCode">
<summary>
Returns a hash code for this Error value.
</summary>
<returns>
An integer that specifies a hash value for this Error value.
</returns>
</member>
<member name="M:Confluent.Kafka.Error.op_Equality(Confluent.Kafka.Error,Confluent.Kafka.Error)">
<summary>
Tests whether Error value a is equal to Error value b.
</summary>
<param name="a">
The first Error value to compare.
</param>
<param name="b">
The second Error value to compare.
</param>
<returns>
true if Error values a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Error.op_Inequality(Confluent.Kafka.Error,Confluent.Kafka.Error)">
<summary>
Tests whether Error value a is not equal to Error value b.
</summary>
<param name="a">
The first Error value to compare.
</param>
<param name="b">
The second Error value to compare.
</param>
<returns>
true if Error values a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Error.ToString">
<summary>
Returns the string representation of the error.
Depending on error source this might be a rich
contextual error message, or a simple static
string representation of the error Code.
</summary>
<returns>
A string representation of the Error object.
</returns>
</member>
<member name="T:Confluent.Kafka.ErrorCode">
<summary>
Enumeration of local and broker generated error codes.
</summary>
<remarks>
Error codes that relate to locally produced errors in
librdkafka are prefixed with Local_
</remarks>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_BadMsg">
<summary>
Received message is incorrect
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_BadCompression">
<summary>
Bad/unknown compression
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Destroy">
<summary>
Broker is going away
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Fail">
<summary>
Generic failure
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Transport">
<summary>
Broker transport failure
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_CritSysResource">
<summary>
Critical system resource
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Resolve">
<summary>
Failed to resolve broker
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_MsgTimedOut">
<summary>
Produced message timed out
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_PartitionEOF">
<summary>
Reached the end of the topic+partition queue on the broker. Not really an error.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_UnknownPartition">
<summary>
Permanent: Partition does not exist in cluster.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_FS">
<summary>
File or filesystem error
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_UnknownTopic">
<summary>
Permanent: Topic does not exist in cluster.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_AllBrokersDown">
<summary>
All broker connections are down.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_InvalidArg">
<summary>
Invalid argument, or invalid configuration
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_TimedOut">
<summary>
Operation timed out
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_QueueFull">
<summary>
Queue is full
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_IsrInsuff">
<summary>
ISR count &lt; required.acks
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_NodeUpdate">
<summary>
Broker node update
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Ssl">
<summary>
SSL error
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_WaitCoord">
<summary>
Waiting for coordinator to become available.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_UnknownGroup">
<summary>
Unknown client group
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_InProgress">
<summary>
Operation in progress
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_PrevInProgress">
<summary>
Previous operation in progress, wait for it to finish.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_ExistingSubscription">
<summary>
This operation would interfere with an existing subscription
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_AssignPartitions">
<summary>
Assigned partitions (rebalance_cb)
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_RevokePartitions">
<summary>
Revoked partitions (rebalance_cb)
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Conflict">
<summary>
Conflicting use
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_State">
<summary>
Wrong state
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_UnknownProtocol">
<summary>
Unknown protocol
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_NotImplemented">
<summary>
Not implemented
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Authentication">
<summary>
Authentication failure
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_NoOffset">
<summary>
No stored offset
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Outdated">
<summary>
Outdated
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_TimedOutQueue">
<summary>
Timed out in queue
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_UnsupportedFeature">
<summary>
Feature not supported by broker
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_WaitCache">
<summary>
Awaiting cache update
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Intr">
<summary>
Operation interrupted
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_KeySerialization">
<summary>
Key serialization error
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_ValueSerialization">
<summary>
Value serialization error
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_KeyDeserialization">
<summary>
Key deserialization error
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_ValueDeserialization">
<summary>
Value deserialization error
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Partial">
<summary>
Partial response
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_ReadOnly">
<summary>
Modification attempted on read-only object
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_NoEnt">
<summary>
No such entry / item not found
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Underflow">
<summary>
Read underflow
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_InvalidType">
<summary>
Invalid type
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Retry">
<summary>
Retry operation.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_PurgeQueue">
<summary>
Purged in queue
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_PurgeInflight">
<summary>
Purged in flight
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Fatal">
<summary>
Fatal error: see rd_kafka_fatal_error()
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Inconsistent">
<summary>
Inconsistent state
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_GaplessGuarantee">
<summary>
Gap-less ordering would not be guaranteed if proceeding
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_MaxPollExceeded">
<summary>
Maximum poll interval exceeded
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_UnknownBroker">
<summary>
Unknown broker
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_NotConfigured">
<summary>
Functionality not configured
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Fenced">
<summary>
Instance has been fenced
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Local_Application">
<summary>
Application generated exception.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.Unknown">
<summary>
Unknown broker error
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NoError">
<summary>
Success
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.OffsetOutOfRange">
<summary>
Offset out of range
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidMsg">
<summary>
Invalid message
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.UnknownTopicOrPart">
<summary>
Unknown topic or partition
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidMsgSize">
<summary>
Invalid message size
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.LeaderNotAvailable">
<summary>
Leader not available
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NotLeaderForPartition">
<summary>
Not leader for partition
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.RequestTimedOut">
<summary>
Request timed out
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.BrokerNotAvailable">
<summary>
Broker not available
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.ReplicaNotAvailable">
<summary>
Replica not available
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.MsgSizeTooLarge">
<summary>
Message size too large
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.StaleCtrlEpoch">
<summary>
StaleControllerEpochCode
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.OffsetMetadataTooLarge">
<summary>
Offset metadata string too large
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NetworkException">
<summary>
Broker disconnected before response received
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.GroupLoadInProress">
<summary>
Group coordinator load in progress
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.GroupCoordinatorNotAvailable">
<summary>
Group coordinator not available
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NotCoordinatorForGroup">
<summary>
Not coordinator for group
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.TopicException">
<summary>
Invalid topic
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.RecordListTooLarge">
<summary>
Message batch larger than configured server segment size
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NotEnoughReplicas">
<summary>
Not enough in-sync replicas
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NotEnoughReplicasAfterAppend">
<summary>
Message(s) written to insufficient number of in-sync replicas
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidRequiredAcks">
<summary>
Invalid required acks value
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.IllegalGeneration">
<summary>
Specified group generation id is not valid
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InconsistentGroupProtocol">
<summary>
Inconsistent group protocol
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidGroupId">
<summary>
Invalid group.id
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.UnknownMemberId">
<summary>
Unknown member
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidSessionTimeout">
<summary>
Invalid session timeout
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.RebalanceInProgress">
<summary>
Group rebalance in progress
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidCommitOffsetSize">
<summary>
Commit offset data size is not valid
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.TopicAuthorizationFailed">
<summary>
Topic authorization failed
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.GroupAuthorizationFailed">
<summary>
Group authorization failed
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.ClusterAuthorizationFailed">
<summary>
Cluster authorization failed
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidTimestamp">
<summary>
Invalid timestamp
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.UnsupportedSaslMechanism">
<summary>
Unsupported SASL mechanism
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.IllegalSaslState">
<summary>
Illegal SASL state
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.UnsupportedVersion">
<summary>
Unsupported version
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.TopicAlreadyExists">
<summary>
Topic already exists
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidPartitions">
<summary>
Invalid number of partitions
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidReplicationFactor">
<summary>
Invalid replication factor
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidReplicaAssignment">
<summary>
Invalid replica assignment
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidConfig">
<summary>
Invalid config
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NotController">
<summary>
Not controller for cluster
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidRequest">
<summary>
Invalid request
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.UnsupportedForMessageFormat">
<summary>
Message format on broker does not support request
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.PolicyViolation">
<summary>
Isolation policy violation
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.OutOfOrderSequenceNumber">
<summary>
Broker received an out of order sequence number
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.DuplicateSequenceNumber">
<summary>
Broker received a duplicate sequence number
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidProducerEpoch">
<summary>
Producer attempted an operation with an old epoch
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidTxnState">
<summary>
Producer attempted a transactional operation in an invalid state
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidProducerIdMapping">
<summary>
Producer attempted to use a producer id which is not currently assigned to its transactional id
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidTransactionTimeout">
<summary>
Transaction timeout is larger than the maximum value allowed by the broker's max.transaction.timeout.ms
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.ConcurrentTransactions">
<summary>
Producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.TransactionCoordinatorFenced">
<summary>
Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.TransactionalIdAuthorizationFailed">
<summary>
Transactional Id authorization failed
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.SecurityDisabled">
<summary>
Security features are disabled
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.OperationNotAttempted">
<summary>
Operation not attempted
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.KafkaStorageError">
<summary>
Disk error when trying to access log file on the disk.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.LogDirNotFound">
<summary>
The user-specified log directory is not found in the broker config.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.SaslAuthenticationFailed">
<summary>
SASL Authentication failed.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.UnknownProducerId">
<summary>
Unknown Producer Id.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.ReassignmentInProgress">
<summary>
Partition reassignment is in progress.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.DelegationTokenAuthDisabled">
<summary>
Delegation Token feature is not enabled.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.DelegationTokenNotFound">
<summary>
Delegation Token is not found on server.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.DelegationTokenOwnerMismatch">
<summary>
Specified Principal is not valid Owner/Renewer.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.DelegationTokenRequestNotAllowed">
<summary>
Delegation Token requests are not allowed on this connection.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.DelegationTokenAuthorizationFailed">
<summary>
Delegation Token authorization failed.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.DelegationTokenExpired">
<summary>
Delegation Token is expired.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidPrincipalType">
<summary>
Supplied principalType is not supported.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.NonEmptyGroup">
<summary>
The group is not empty.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.GroupIdNotFound">
<summary>
The group id does not exist.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.FetchSessionIdNotFound">
<summary>
The fetch session ID was not found.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.InvalidFetchSessionEpoch">
<summary>
The fetch session epoch is invalid.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.ListenerNotFound">
<summary>
No matching listener.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.TopicDeletionDisabled">
<summary>
Topic deletion is disabled.
</summary>
</member>
<member name="F:Confluent.Kafka.ErrorCode.UnsupportedCompressionType">
<summary>
Unsupported compression type.
</summary>
</member>
<member name="T:Confluent.Kafka.ErrorCodeExtensions">
<summary>
Provides extension methods on the ErrorCode enumeration.
</summary>
</member>
<member name="M:Confluent.Kafka.ErrorCodeExtensions.GetReason(Confluent.Kafka.ErrorCode)">
<summary>
Returns the static error string associated with
the particular ErrorCode value.
</summary>
</member>
<member name="T:Confluent.Kafka.MessageNullException">
<summary>
Thrown when there is an attempt to dereference a null Message reference.
</summary>
</member>
<member name="M:Confluent.Kafka.MessageNullException.#ctor">
<summary>
Initializes a new instance of MessageNullException.
</summary>
</member>
<member name="T:Confluent.Kafka.TopicPartitionException">
<summary>
Represents an error that occured during a Consumer.Position request.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionException.#ctor(System.Collections.Generic.List{Confluent.Kafka.TopicPartitionError})">
<summary>
Initializes a new instance of OffsetsRequestExceptoion.
</summary>
<param name="results">
The result corresponding to all topic partitions of the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.TopicPartitionException.Results">
<summary>
The result corresponding to all ConfigResources in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.TopicPartitionOffsetException">
<summary>
Represents an error that occured during a Consumer.Position request.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetException.#ctor(System.Collections.Generic.List{Confluent.Kafka.TopicPartitionOffsetError})">
<summary>
Initializes a new instance of OffsetsRequestExceptoion.
</summary>
<param name="results">
The result corresponding to all topic partitions of the request
(whether or not they were in error). At least one of these
results will be in error.
</param>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffsetException.Results">
<summary>
The result corresponding to all ConfigResources in the request
(whether or not they were in error). At least one of these
results will be in error.
</summary>
</member>
<member name="T:Confluent.Kafka.GroupInfo">
<summary>
Encapsulates information describing a particular
Kafka group.
</summary>
</member>
<member name="M:Confluent.Kafka.GroupInfo.#ctor(Confluent.Kafka.BrokerMetadata,System.String,Confluent.Kafka.Error,System.String,System.String,System.String,System.Collections.Generic.List{Confluent.Kafka.GroupMemberInfo})">
<summary>
Initializes a new instance of the GroupInfo class.
</summary>
<param name="broker">
Originating broker info.
</param>
<param name="group">
The group name.
</param>
<param name="error">
A rich <see cref="P:Confluent.Kafka.GroupInfo.Error"/> value associated with the information encapsulated by this class.
</param>
<param name="state">
The group state.
</param>
<param name="protocolType">
The group protocol type.
</param>
<param name="protocol">
The group protocol.
</param>
<param name="members">
The group members.
</param>
</member>
<member name="P:Confluent.Kafka.GroupInfo.Broker">
<summary>
Gets the originating-broker info.
</summary>
</member>
<member name="P:Confluent.Kafka.GroupInfo.Group">
<summary>
Gets the group name
</summary>
</member>
<member name="P:Confluent.Kafka.GroupInfo.Error">
<summary>
Gets a rich <see cref="P:Confluent.Kafka.GroupInfo.Error"/> value associated with the information encapsulated by this class.
</summary>
</member>
<member name="P:Confluent.Kafka.GroupInfo.State">
<summary>
Gets the group state
</summary>
</member>
<member name="P:Confluent.Kafka.GroupInfo.ProtocolType">
<summary>
Gets the group protocol type
</summary>
</member>
<member name="P:Confluent.Kafka.GroupInfo.Protocol">
<summary>
Gets the group protocol
</summary>
</member>
<member name="P:Confluent.Kafka.GroupInfo.Members">
<summary>
Gets the group members
</summary>
</member>
<member name="T:Confluent.Kafka.GroupMemberInfo">
<summary>
Encapsulates information describing a particular
member of a Kafka group.
</summary>
</member>
<member name="M:Confluent.Kafka.GroupMemberInfo.#ctor(System.String,System.String,System.String,System.Byte[],System.Byte[])">
<summary>
Initializes a new GroupMemberInfo class instance.
</summary>
<param name="memberId">
The member id (generated by the broker).
</param>
<param name="clientId">
The client's client.id.
</param>
<param name="clientHost">
The client's hostname.
</param>
<param name="memberMetadata">
Gets the member metadata (binary). The format of this data depends on the protocol type.
</param>
<param name="memberAssignment">
Gets the member assignment (binary). The format of this data depends on the protocol type.
</param>
</member>
<member name="P:Confluent.Kafka.GroupMemberInfo.MemberId">
<summary>
Gets the member id (generated by broker).
</summary>
</member>
<member name="P:Confluent.Kafka.GroupMemberInfo.ClientId">
<summary>
Gets the client's client.id.
</summary>
</member>
<member name="P:Confluent.Kafka.GroupMemberInfo.ClientHost">
<summary>
Gets the client's hostname.
</summary>
</member>
<member name="P:Confluent.Kafka.GroupMemberInfo.MemberMetadata">
<summary>
Gets the member metadata (binary). The format of this data depends on the protocol type.
</summary>
</member>
<member name="P:Confluent.Kafka.GroupMemberInfo.MemberAssignment">
<summary>
Gets the member assignment (binary). The format of this data depends on the protocol type.
</summary>
</member>
<member name="T:Confluent.Kafka.Handle">
<summary>
A handle for a librdkafka client instance. Also encapsulates
a reference to the IClient instance that owns this handle.
</summary>
</member>
<member name="P:Confluent.Kafka.Handle.IsInvalid">
<summary>
Gets a value indicating whether the encapsulated librdkafka handle is invalid.
</summary>
<value>
<b>true</b> if the encapsulated librdkafka handle is invalid; otherwise, <b>false</b>.
</value>
</member>
<member name="T:Confluent.Kafka.Header">
<summary>
Represents a kafka message header.
</summary>
<remarks>
Message headers are supported by v0.11 brokers and above.
</remarks>
</member>
<member name="P:Confluent.Kafka.Header.Key">
<summary>
The header key.
</summary>
</member>
<member name="M:Confluent.Kafka.Header.GetValueBytes">
<summary>
Get the serialized header value data.
</summary>
</member>
<member name="M:Confluent.Kafka.Header.#ctor(System.String,System.Byte[])">
<summary>
Create a new Header instance.
</summary>
<param name="key">
The header key.
</param>
<param name="value">
The header value (may be null).
</param>
</member>
<member name="T:Confluent.Kafka.Headers">
<summary>
A collection of Kafka message headers.
</summary>
<remarks>
Message headers are supported by v0.11 brokers and above.
</remarks>
</member>
<member name="M:Confluent.Kafka.Headers.Add(System.String,System.Byte[])">
<summary>
Append a new header to the collection.
</summary>
<param name="key">
The header key.
</param>
<param name="val">
The header value (possibly null). Note: A null
header value is distinct from an empty header
value (array of length 0).
</param>
</member>
<member name="M:Confluent.Kafka.Headers.Add(Confluent.Kafka.Header)">
<summary>
Append a new header to the collection.
</summary>
<param name="header">
The header to add to the collection.
</param>
</member>
<member name="M:Confluent.Kafka.Headers.GetLastBytes(System.String)">
<summary>
Get the value of the latest header with the specified key.
</summary>
<param name="key">
The key to get the associated value of.
</param>
<returns>
The value of the latest element in the collection with the specified key.
</returns>
<exception cref="T:System.Collections.Generic.KeyNotFoundException">
The key <paramref name="key" /> was not present in the collection.
</exception>
</member>
<member name="M:Confluent.Kafka.Headers.TryGetLastBytes(System.String,System.Byte[]@)">
<summary>
Try to get the value of the latest header with the specified key.
</summary>
<param name="key">
The key to get the associated value of.
</param>
<param name="lastHeader">
The value of the latest element in the collection with the
specified key, if a header with that key was present in the
collection.
</param>
<returns>
true if the a value with the specified key was present in
the collection, false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Headers.Remove(System.String)">
<summary>
Removes all headers for the given key.
</summary>
<param name="key">
The key to remove all headers for
</param>
</member>
<member name="M:Confluent.Kafka.Headers.GetEnumerator">
<summary>
Returns an enumerator that iterates through the headers collection.
</summary>
<returns>
An enumerator object that can be used to iterate through the headers collection.
</returns>
</member>
<member name="M:Confluent.Kafka.Headers.System#Collections#IEnumerable#GetEnumerator">
<summary>
Returns an enumerator that iterates through the headers collection.
</summary>
<returns>
An enumerator object that can be used to iterate through the headers collection.
</returns>
</member>
<member name="P:Confluent.Kafka.Headers.Item(System.Int32)">
<summary>
Gets the header at the specified index
</summary>
<param key="index">
The zero-based index of the element to get.
</param>
</member>
<member name="P:Confluent.Kafka.Headers.Count">
<summary>
The number of headers in the collection.
</summary>
</member>
<member name="T:Confluent.Kafka.IAdminClient">
<summary>
Defines an Apache Kafka admin client.
</summary>
</member>
<member name="M:Confluent.Kafka.IAdminClient.ListGroups(System.TimeSpan)">
<summary>
Get information pertaining to all groups in
the Kafka cluster (blocking)
[API-SUBJECT-TO-CHANGE] - The API associated
with this functionality is subject to change.
</summary>
<param name="timeout">
The maximum period of time the call may block.
</param>
</member>
<member name="M:Confluent.Kafka.IAdminClient.ListGroup(System.String,System.TimeSpan)">
<summary>
Get information pertaining to a particular
group in the Kafka cluster (blocking).
[API-SUBJECT-TO-CHANGE] - The API associated
with this functionality is subject to change.
</summary>
<param name="group">
The group of interest.
</param>
<param name="timeout">
The maximum period of time the call
may block.
</param>
<returns>
Returns information pertaining to the
specified group or null if this group does
not exist.
</returns>
</member>
<member name="M:Confluent.Kafka.IAdminClient.GetMetadata(System.String,System.TimeSpan)">
<summary>
Query the cluster for metadata for a
specific topic.
[API-SUBJECT-TO-CHANGE] - The API associated
with this functionality is subject to change.
</summary>
</member>
<member name="M:Confluent.Kafka.IAdminClient.GetMetadata(System.TimeSpan)">
<summary>
Query the cluster for metadata.
[API-SUBJECT-TO-CHANGE] - The API associated
with this functionality is subject to change.
</summary>
</member>
<member name="M:Confluent.Kafka.IAdminClient.CreatePartitionsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.PartitionsSpecification},Confluent.Kafka.Admin.CreatePartitionsOptions)">
<summary>
Increase the number of partitions for one
or more topics as per the supplied
PartitionsSpecifications.
</summary>
<param name="partitionsSpecifications">
A collection of PartitionsSpecifications.
</param>
<param name="options">
The options to use when creating
the partitions.
</param>
<returns>
The results of the
PartitionsSpecification requests.
</returns>
</member>
<member name="M:Confluent.Kafka.IAdminClient.DeleteTopicsAsync(System.Collections.Generic.IEnumerable{System.String},Confluent.Kafka.Admin.DeleteTopicsOptions)">
<summary>
Delete a set of topics. This operation is not
transactional so it may succeed for some
topics while fail for others. It may take
several seconds after the DeleteTopicsResult
returns success for all the brokers to become
aware that the topics are gone. During this
time, topics may continue to be visible via
admin operations. If delete.topic.enable is
false on the brokers, DeleteTopicsAsync will
mark the topics for deletion, but not
actually delete them. The Task will return
successfully in this case.
</summary>
<param name="topics">
The topic names to delete.
</param>
<param name="options">
The options to use when deleting topics.
</param>
<returns>
The results of the delete topic requests.
</returns>
</member>
<member name="M:Confluent.Kafka.IAdminClient.CreateTopicsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.TopicSpecification},Confluent.Kafka.Admin.CreateTopicsOptions)">
<summary>
Create a set of new topics.
</summary>
<param name="topics">
A collection of specifications for
the new topics to create.
</param>
<param name="options">
The options to use when creating
the topics.
</param>
<returns>
The results of the create topic requests.
</returns>
</member>
<member name="M:Confluent.Kafka.IAdminClient.AlterConfigsAsync(System.Collections.Generic.Dictionary{Confluent.Kafka.Admin.ConfigResource,System.Collections.Generic.List{Confluent.Kafka.Admin.ConfigEntry}},Confluent.Kafka.Admin.AlterConfigsOptions)">
<summary>
Update the configuration for the specified
resources. Updates are not transactional so
they may succeed for some resources while fail
for others. The configs for a particular
resource are updated atomically. This operation
is supported by brokers with version 0.11.0
or higher. IMPORTANT NOTE: Unspecified
configuration properties will be reverted to
their default values. Furthermore, if you use
DescribeConfigsAsync to obtain the current set
of configuration values, modify them, then use
AlterConfigsAsync to set them, you will loose
any non-default values that are marked as
sensitive because they are not provided by
DescribeConfigsAsync.
</summary>
<param name="configs">
The resources with their configs
(topic is the only resource type with configs
that can be updated currently).
</param>
<param name="options">
The options to use when altering configs.
</param>
<returns>
The results of the alter configs requests.
</returns>
</member>
<member name="M:Confluent.Kafka.IAdminClient.DescribeConfigsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.Admin.ConfigResource},Confluent.Kafka.Admin.DescribeConfigsOptions)">
<summary>
Get the configuration for the specified
resources. The returned configuration includes
default values and the IsDefault property can be
used to distinguish them from user supplied values.
The value of config entries where IsSensitive is
true is always null so that sensitive information
is not disclosed. Config entries where IsReadOnly
is true cannot be updated. This operation is
supported by brokers with version 0.11.0.0 or higher.
</summary>
<param name="resources">
The resources (topic and broker resource
types are currently supported)
</param>
<param name="options">
The options to use when describing configs.
</param>
<returns>
Configs for the specified resources.
</returns>
</member>
<member name="M:Confluent.Kafka.IAdminClient.DeleteRecordsAsync(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset},Confluent.Kafka.Admin.DeleteRecordsOptions)">
<summary>
Delete records (messages) in topic partitions
older than the offsets provided.
</summary>
<param name="topicPartitionOffsets">
The offsets to delete up to.
</param>
<param name="options">
The options to use when deleting records.
</param>
<returns>
The result of the delete records request.
</returns>
</member>
<member name="T:Confluent.Kafka.IAsyncDeserializer`1">
<summary>
A deserializer for use with <see cref="T:Confluent.Kafka.Consumer`2" />.
</summary>
</member>
<member name="M:Confluent.Kafka.IAsyncDeserializer`1.DeserializeAsync(System.ReadOnlyMemory{System.Byte},System.Boolean,Confluent.Kafka.SerializationContext)">
<summary>
Deserialize a message key or value.
</summary>
<param name="data">
The raw byte data to deserialize.
</param>
<param name="isNull">
True if this is a null value.
</param>
<param name="context">
Context relevant to the deserialize operation.
</param>
<returns>
A <see cref="T:System.Threading.Tasks.Task" /> that completes
with the deserialized value.
</returns>
</member>
<member name="T:Confluent.Kafka.IAsyncSerializer`1">
<summary>
Defines a serializer for use with <see cref="T:Confluent.Kafka.Producer`2" />.
</summary>
</member>
<member name="M:Confluent.Kafka.IAsyncSerializer`1.SerializeAsync(`0,Confluent.Kafka.SerializationContext)">
<summary>
Serialize the key or value of a <see cref="T:Confluent.Kafka.Message`2" />
instance.
</summary>
<param name="data">
The value to serialize.
</param>
<param name="context">
Context relevant to the serialize operation.
</param>
<returns>
A <see cref="T:System.Threading.Tasks.Task" /> that
completes with the serialized data.
</returns>
</member>
<member name="T:Confluent.Kafka.IClient">
<summary>
Defines methods common to all client types.
</summary>
</member>
<member name="P:Confluent.Kafka.IClient.Handle">
<summary>
An opaque reference to the underlying
librdkafka client instance. This can be used
to construct an AdminClient that utilizes the
same underlying librdkafka client as this
instance.
</summary>
</member>
<member name="P:Confluent.Kafka.IClient.Name">
<summary>
Gets the name of this client instance.
Contains (but is not equal to) the client.id
configuration parameter.
</summary>
<remarks>
This name will be unique across all client
instances in a given application which allows
log messages to be associated with the
corresponding instance.
</remarks>
</member>
<member name="M:Confluent.Kafka.IClient.AddBrokers(System.String)">
<summary>
Adds one or more brokers to the Client's list
of initial bootstrap brokers.
Note: Additional brokers are discovered
automatically as soon as the Client connects
to any broker by querying the broker metadata.
Calling this method is only required in some
scenarios where the address of all brokers in
the cluster changes.
</summary>
<param name="brokers">
Comma-separated list of brokers in
the same format as the bootstrap.server
configuration parameter.
</param>
<remarks>
There is currently no API to remove existing
configured, added or learnt brokers.
</remarks>
<returns>
The number of brokers added. This value
includes brokers that may have been specified
a second time.
</returns>
</member>
<member name="T:Confluent.Kafka.IConsumer`2">
<summary>
Defines a high-level Apache Kafka consumer
(with key and value deserialization).
</summary>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Consume(System.Int32)">
<summary>
Poll for new messages / events. Blocks
until a consume result is available or the
timeout period has elapsed.
</summary>
<param name="millisecondsTimeout">
The maximum period of time (in milliseconds)
the call may block.
</param>
<returns>
The consume result.
</returns>
<remarks>
The partitions assigned/revoked and offsets
committed handlers may be invoked as a
side-effect of calling this method (on the
same thread).
</remarks>
<exception cref="T:Confluent.Kafka.ConsumeException">
Thrown
when a call to this method is unsuccessful
for any reason. Inspect the Error property
of the exception for detailed information.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Consume(System.Threading.CancellationToken)">
<summary>
Poll for new messages / events. Blocks
until a consume result is available or the
operation has been cancelled.
</summary>
<param name="cancellationToken">
A cancellation token
that can be used to cancel this operation.
</param>
<returns>
The consume result.
</returns>
<remarks>
The partitions assigned/revoked and
offsets committed handlers may be invoked
as a side-effect of calling this method
(on the same thread).
</remarks>
<exception cref="T:Confluent.Kafka.ConsumeException">
Thrown
when a call to this method is unsuccessful
for any reason (except cancellation by
user). Inspect the Error property of the
exception for detailed information.
</exception>
<exception cref="T:System.OperationCanceledException">
Thrown on cancellation.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Consume(System.TimeSpan)">
<summary>
Poll for new messages / events. Blocks
until a consume result is available or the
timeout period has elapsed.
</summary>
<param name="timeout">
The maximum period of time
the call may block.
</param>
<returns>
The consume result.
</returns>
<remarks>
The partitions assigned/revoked and offsets
committed handlers may be invoked as a
side-effect of calling this method (on the
same thread).
</remarks>
<exception cref="T:Confluent.Kafka.ConsumeException">
Thrown
when a call to this method is unsuccessful
for any reason. Inspect the Error property
of the exception for detailed information.
</exception>
</member>
<member name="P:Confluent.Kafka.IConsumer`2.MemberId">
<summary>
Gets the (dynamic) group member id of
this consumer (as set by the broker).
</summary>
</member>
<member name="P:Confluent.Kafka.IConsumer`2.Assignment">
<summary>
Gets the current partition assignment as set by
<see cref="M:Confluent.Kafka.Consumer`2.Assign(Confluent.Kafka.TopicPartition)" />
or implicitly.
</summary>
</member>
<member name="P:Confluent.Kafka.IConsumer`2.Subscription">
<summary>
Gets the current topic subscription as set by
<see cref="M:Confluent.Kafka.Consumer`2.Subscribe(System.String)" />.
</summary>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Subscribe(System.Collections.Generic.IEnumerable{System.String})">
<summary>
Update the topic subscription.
Any previous subscription will be
unassigned and unsubscribed first.
</summary>
<param name="topics">
The topics to subscribe to.
A regex can be specified to subscribe to
the set of all matching topics (which is
updated as topics are added / removed from
the cluster). A regex must be front
anchored to be recognized as a regex.
e.g. ^myregex
</param>
<remarks>
The topic subscription set denotes the
desired set of topics to consume from.
This set is provided to the consumer
group leader (one of the group
members) which uses the configured
partition.assignment.strategy to
allocate partitions of topics in the
subscription set to the consumers in
the group.
</remarks>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Subscribe(System.String)">
<summary>
Sets the subscription set to a single
topic.
Any previous subscription will be
unassigned and unsubscribed first.
</summary>
<param name="topic">
The topic to subscribe to.
A regex can be specified to subscribe to
the set of all matching topics (which is
updated as topics are added / removed from
the cluster). A regex must be front
anchored to be recognized as a regex.
e.g. ^myregex
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Unsubscribe">
<summary>
Unsubscribe from the current subscription
set.
</summary>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Assign(Confluent.Kafka.TopicPartition)">
<summary>
Sets the current set of assigned partitions
(the set of partitions the consumer will consume
from) to a single <paramref name="partition" />.
Note: The newly specified set is the complete
set of partitions to consume from. If the
consumer is already assigned to a set of
partitions, the previous set will be replaced.
</summary>
<param name="partition">
The partition to consume from.
Consumption will resume from the last committed
offset, or according to the 'auto.offset.reset'
configuration parameter if no offsets have been
committed yet.
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Assign(Confluent.Kafka.TopicPartitionOffset)">
<summary>
Sets the current set of assigned partitions
(the set of partitions the consumer will consume
from) to a single <paramref name="partition" />.
Note: The newly specified set is the complete
set of partitions to consume from. If the
consumer is already assigned to a set of
partitions, the previous set will be replaced.
</summary>
<param name="partition">
The partition to consume from.
If an offset value of Offset.Unset (-1001) is
specified, consumption will resume from the last
committed offset, or according to the
'auto.offset.reset' configuration parameter if
no offsets have been committed yet.
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Assign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset})">
<summary>
Sets the current set of assigned partitions
(the set of partitions the consumer will consume
from) to <paramref name="partitions" />.
Note: The newly specified set is the complete
set of partitions to consume from. If the
consumer is already assigned to a set of
partitions, the previous set will be replaced.
</summary>
<param name="partitions">
The set of partitions to consume from.
If an offset value of Offset.Unset (-1001) is
specified for a partition, consumption will
resume from the last committed offset on that
partition, or according to the
'auto.offset.reset' configuration parameter if
no offsets have been committed yet.
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Assign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<summary>
Sets the current set of assigned partitions
(the set of partitions the consumer will consume
from) to <paramref name="partitions" />.
Note: The newly specified set is the complete
set of partitions to consume from. If the
consumer is already assigned to a set of
partitions, the previous set will be replaced.
</summary>
<param name="partitions">
The set of partitions to consume from.
Consumption will resume from the last committed
offset on each partition, or according to the
'auto.offset.reset' configuration parameter if
no offsets have been committed yet.
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.IncrementalAssign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset})">
<summary>
Incrementally add <paramref name="partitions" />
to the current assignment, starting consumption
from the specified offsets.
</summary>
<param name="partitions">
The set of additional partitions to consume from.
If an offset value of Offset.Unset (-1001) is
specified for a partition, consumption will
resume from the last committed offset on that
partition, or according to the
'auto.offset.reset' configuration parameter if
no offsets have been committed yet.
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.IncrementalAssign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<summary>
Incrementally add <paramref name="partitions" />
to the current assignment.
</summary>
<param name="partitions">
The set of additional partitions to consume from.
Consumption will resume from the last committed
offset on each partition, or according to the
'auto.offset.reset' configuration parameter if
no offsets have been committed yet.
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.IncrementalUnassign(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<summary>
Incrementally remove <paramref name="partitions" />
to the current assignment.
</summary>
<param name="partitions">
The set of partitions to remove from the current
assignment.
</param>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Unassign">
<summary>
Remove the current set of assigned partitions
and stop consumption.
</summary>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.StoreOffset(Confluent.Kafka.ConsumeResult{`0,`1})">
<summary>
Store offsets for a single partition based on
the topic/partition/offset of a consume result.
The offset will be committed according to
`auto.commit.interval.ms` (and
`enable.auto.commit`) or manual offset-less
commit().
</summary>
<remarks>
`enable.auto.offset.store` must be set to
"false" when using this API.
</remarks>
<param name="result">
A consume result used to determine
the offset to store and topic/partition.
</param>
<returns>
Current stored offset or a partition
specific error.
</returns>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionOffsetException">
Thrown if result is in error.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.StoreOffset(Confluent.Kafka.TopicPartitionOffset)">
<summary>
Store offsets for a single partition.
The offset will be committed (written) to the
offset store according to `auto.commit.interval.ms`
or manual offset-less commit(). Calling
this method in itself does not commit offsets,
only store them for future commit.
</summary>
<remarks>
`enable.auto.offset.store` must be set to
"false" when using this API.
</remarks>
<param name="offset">
The offset to be committed.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Commit">
<summary>
Commit all offsets for the current assignment.
</summary>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionOffsetException">
Thrown if any of the constituent results is in
error. The entire result (which may contain
constituent results that are not in error) is
available via the <see cref="P:Confluent.Kafka.TopicPartitionOffsetException.Results" />
property of the exception.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Commit(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset})">
<summary>
Commit an explicit list of offsets.
</summary>
<param name="offsets">
The topic/partition offsets to commit.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionOffsetException">
Thrown if any of the constituent results is in
error. The entire result (which may contain
constituent results that are not in error) is
available via the <see cref="P:Confluent.Kafka.TopicPartitionOffsetException.Results" />
property of the exception.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Commit(Confluent.Kafka.ConsumeResult{`0,`1})">
<summary>
Commits an offset based on the
topic/partition/offset of a ConsumeResult.
</summary>
<param name="result">
The ConsumeResult instance used
to determine the committed offset.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionOffsetException">
Thrown if the result is in error.
</exception>
<remarks>
A consumer at position N has consumed
messages with offsets up to N-1 and will
next receive the message with offset N.
Hence, this method commits an offset of
<paramref name="result" />.Offset + 1.
</remarks>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Seek(Confluent.Kafka.TopicPartitionOffset)">
<summary>
Seek to <parmref name="offset"/> on the
specified topic partition which is either
an absolute or logical offset. This must
only be done for partitions that are
currently being consumed (i.e., have been
Assign()ed). To set the start offset for
not-yet-consumed partitions you should use the
Assign method instead.
</summary>
<param name="tpo">
The topic partition to seek
on and the offset to seek to.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Pause(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<summary>
Pause consumption for the provided list
of partitions.
</summary>
<param name="partitions">
The partitions to pause consumption of.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionException">
Per partition success or error.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Resume(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition})">
<summary>
Resume consumption for the provided list of partitions.
</summary>
<param name="partitions">
The partitions to resume consumption of.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionException">
Per partition success or error.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Committed(System.TimeSpan)">
<summary>
Retrieve current committed offsets for the
current assignment.
The offset field of each requested partition
will be set to the offset of the last consumed
message, or Offset.Unset in case there was no
previous message, or, alternately a partition
specific error may also be returned.
</summary>
<param name="timeout">
The maximum period of time the call
may block.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionOffsetException">
Thrown if any of the constituent results is in
error. The entire result (which may contain
constituent results that are not in error) is
available via the
<see cref="P:Confluent.Kafka.TopicPartitionOffsetException.Results" />
property of the exception.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Committed(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartition},System.TimeSpan)">
<summary>
Retrieve current committed offsets for the
specified topic partitions.
The offset field of each requested partition
will be set to the offset of the last consumed
message, or Offset.Unset in case there was no
previous message, or, alternately a partition
specific error may also be returned.
</summary>
<param name="partitions">
the partitions to get the committed
offsets for.
</param>
<param name="timeout">
The maximum period of time the call
may block.
</param>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionOffsetException">
Thrown if any of the constituent results is in
error. The entire result (which may contain
constituent results that are not in error) is
available via the
<see cref="P:Confluent.Kafka.TopicPartitionOffsetException.Results" />
property of the exception.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Position(Confluent.Kafka.TopicPartition)">
<summary>
Gets the current position (offset) for the
specified topic / partition.
The offset field of each requested partition
will be set to the offset of the last consumed
message + 1, or Offset.Unset in case there was
no previous message consumed by this consumer.
</summary>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the request failed.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.OffsetsForTimes(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionTimestamp},System.TimeSpan)">
<summary>
Look up the offsets for the given partitions
by timestamp. The returned offset for each
partition is the earliest offset for which
the timestamp is greater than or equal to
the given timestamp. If the provided
timestamp exceeds that of the last message
in the partition, a value of Offset.End (-1)
will be returned.
</summary>
<remarks>
The consumer does not need to be assigned to
the requested partitions.
</remarks>
<param name="timestampsToSearch">
The mapping from partition
to the timestamp to look up.
</param>
<param name="timeout">
The maximum period of time the
call may block.
</param>
<returns>
A mapping from partition to the
timestamp and offset of the first message with
timestamp greater than or equal to the target
timestamp.
</returns>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown
if the operation fails.
</exception>
<exception cref="T:Confluent.Kafka.TopicPartitionOffsetException">
Thrown if any of the constituent results is
in error. The entire result (which may contain
constituent results that are not in error) is
available via the
<see cref="P:Confluent.Kafka.TopicPartitionOffsetException.Results" />
property of the exception.
</exception>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.GetWatermarkOffsets(Confluent.Kafka.TopicPartition)">
<summary>
Get the last cached low (oldest available /
beginning) and high (newest/end) offsets for
a topic/partition. Does not block.
</summary>
<remarks>
The low offset is updated periodically (if
statistics.interval.ms is set) while the
high offset is updated on each fetched
message set from the broker. If there is no
cached offset (either low or high, or both)
then Offset.Unset will be returned for the
respective offset.
</remarks>
<param name="topicPartition">
The topic partition of interest.
</param>
<returns>
The requested WatermarkOffsets
(see that class for additional documentation).
</returns>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.QueryWatermarkOffsets(Confluent.Kafka.TopicPartition,System.TimeSpan)">
<summary>
Query the Kafka cluster for low (oldest
available/beginning) and high (newest/end)
offsets for the specified topic/partition.
This is a blocking call - always contacts
the cluster for the required information.
</summary>
<param name="topicPartition">
The topic/partition of interest.
</param>
<param name="timeout">
The maximum period of time
the call may block.
</param>
<returns>
The requested WatermarkOffsets (see
that class for additional documentation).
</returns>
</member>
<member name="M:Confluent.Kafka.IConsumer`2.Close">
<summary>
Commits offsets (if auto commit is enabled),
alerts the group coordinator
that the consumer is exiting the group then
releases all resources used by this consumer.
You should call <see cref="M:Confluent.Kafka.Consumer`2.Close" />
instead of <see cref="M:Confluent.Kafka.Consumer`2.Dispose" />
(or just before) to ensure a timely consumer
group rebalance. If you do not call
<see cref="M:Confluent.Kafka.Consumer`2.Close" />
or <see cref="M:Confluent.Kafka.Consumer`2.Unsubscribe" />,
the group will rebalance after a timeout
specified by the group's `session.timeout.ms`.
Note: the partition assignment and partitions
revoked handlers may be called as a side-effect
of calling this method.
</summary>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown if the operation fails.
</exception>
</member>
<member name="P:Confluent.Kafka.IConsumer`2.ConsumerGroupMetadata">
<summary>
The current consumer group metadata associated with this consumer,
or null if a GroupId has not been specified for the consumer.
This metadata object should be passed to the transactional producer's
<see cref="M:Confluent.Kafka.IProducer`2.SendOffsetsToTransaction(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset},Confluent.Kafka.IConsumerGroupMetadata,System.TimeSpan)"/>
method.
</summary>
</member>
<member name="T:Confluent.Kafka.IDeserializer`1">
<summary>
Defines a deserializer for use with <see cref="T:Confluent.Kafka.Consumer`2" />.
</summary>
</member>
<member name="M:Confluent.Kafka.IDeserializer`1.Deserialize(System.ReadOnlySpan{System.Byte},System.Boolean,Confluent.Kafka.SerializationContext)">
<summary>
Deserialize a message key or value.
</summary>
<param name="data">
The data to deserialize.
</param>
<param name="isNull">
Whether or not the value is null.
</param>
<param name="context">
Context relevant to the deserialize operation.
</param>
<returns>
The deserialized value.
</returns>
</member>
<member name="T:Confluent.Kafka.Ignore">
<summary>
A type for use in conjunction with IgnoreDeserializer that enables
message keys or values to be read as null, regardless of their value.
</summary>
</member>
<member name="T:Confluent.Kafka.IHeader">
<summary>
Defines a Kafka message header.
</summary>
</member>
<member name="P:Confluent.Kafka.IHeader.Key">
<summary>
The header key.
</summary>
</member>
<member name="M:Confluent.Kafka.IHeader.GetValueBytes">
<summary>
The serialized header value data.
</summary>
</member>
<member name="M:Confluent.Kafka.Impl.Librdkafka.Initialize(System.String)">
<summary>
Attempt to load librdkafka.
</summary>
<returns>
true if librdkafka was loaded as a result of this call, false if the
library has already been loaded.
throws DllNotFoundException if librdkafka could not be loaded.
throws FileLoadException if the loaded librdkafka version is too low.
throws InvalidOperationException on other error.
</returns>
</member>
<member name="T:Confluent.Kafka.Impl.Librdkafka.ProduceVarTag">
<summary>
Var-arg tag types, used in producev
</summary>
</member>
<member name="T:Confluent.Kafka.Impl.NativeMethods.NativeMethods">
<summary>
This class should be an exact replica of other NativeMethods classes, except
for the DllName const.
</summary>
<remarks>
This copy/pasting is required because DllName must be const.
TODO: generate the NativeMethods classes at runtime (compile C# code) rather
than copy/paste.
Alternatively, we could have used dlopen to load the native library, but to
do that we need to know the absolute path of the native libraries because the
dlopen call does not know .NET runtime library storage conventions. Unfortunately
these are relatively complex, so we prefer to go with the copy/paste solution
which is relatively simple.
</remarks>
</member>
<member name="T:Confluent.Kafka.Impl.NativeMethods.NativeMethods_Alpine">
<summary>
This class should be an exact replica of other NativeMethods classes, except
for the DllName const.
</summary>
<remarks>
This copy/pasting is required because DllName must be const.
TODO: generate the NativeMethods classes at runtime (compile C# code) rather
than copy/paste.
Alternatively, we could have used dlopen to load the native library, but to
do that we need to know the absolute path of the native libraries because the
dlopen call does not know .NET runtime library storage conventions. Unfortunately
these are relatively complex, so we prefer to go with the copy/paste solution
which is relatively simple.
</remarks>
</member>
<member name="T:Confluent.Kafka.Impl.NativeMethods.NativeMethods_Centos6">
<summary>
This class should be an exact replica of other NativeMethods classes, except
for the DllName const.
</summary>
<remarks>
This copy/pasting is required because DllName must be const.
TODO: generate the NativeMethods classes at runtime (compile C# code) rather
than copy/paste.
Alternatively, we could have used dlopen to load the native library, but to
do that we need to know the absolute path of the native libraries because the
dlopen call does not know .NET runtime library storage conventions. Unfortunately
these are relatively complex, so we prefer to go with the copy/paste solution
which is relatively simple.
</remarks>
</member>
<member name="T:Confluent.Kafka.Impl.NativeMethods.NativeMethods_Centos7">
<summary>
This class should be an exact replica of other NativeMethods classes, except
for the DllName const.
</summary>
<remarks>
This copy/pasting is required because DllName must be const.
TODO: generate the NativeMethods classes at runtime (compile C# code) rather
than copy/paste.
Alternatively, we could have used dlopen to load the native library, but to
do that we need to know the absolute path of the native libraries because the
dlopen call does not know .NET runtime library storage conventions. Unfortunately
these are relatively complex, so we prefer to go with the copy/paste solution
which is relatively simple.
</remarks>
</member>
<member name="F:Confluent.Kafka.Impl.ConfRes.Unknown">
<summary>
Unknown configuration name.
</summary>
</member>
<member name="F:Confluent.Kafka.Impl.ConfRes.Invalid">
<summary>
Invalid configuration value.
</summary>
</member>
<member name="F:Confluent.Kafka.Impl.ConfRes.Ok">
<summary>
Configuration okay
</summary>
</member>
<member name="M:Confluent.Kafka.Impl.SafeKafkaHandle.SetOwner(Confluent.Kafka.IClient)">
<summary>
This object is tightly coupled to the referencing Producer /
Consumer via callback objects passed into the librdkafka
config. These are not tracked by the CLR, so we need to
maintain an explicit reference to the containing object here
so the delegates - which may get called by librdkafka during
destroy - are guaranteed to exist during finalization.
Note: objects referenced by this handle (recursively) will
not be GC'd at the time of finalization as the freachable
list is a GC root. Also, the delegates are ok to use since they
don't have finalizers.
this is a useful reference:
https://stackoverflow.com/questions/6964270/which-objects-can-i-use-in-a-finalizer-method
</summary>
</member>
<member name="M:Confluent.Kafka.Impl.SafeKafkaHandle.ThrowIfHandleClosed">
<summary>
Prevent AccessViolationException when handle has already been closed.
Should be called at start of every function using the handle,
except in ReleaseHandle.
</summary>
</member>
<member name="M:Confluent.Kafka.Impl.SafeKafkaHandle.NewTopic(System.String,System.IntPtr)">
<summary>
Setting the config parameter to IntPtr.Zero returns the handle of an
existing topic, or an invalid handle if a topic with name <paramref name="topic" />
does not exist. Note: Only the first applied configuration for a specific
topic will be used.
</summary>
</member>
<member name="M:Confluent.Kafka.Impl.SafeKafkaHandle.GetMetadata(System.Boolean,Confluent.Kafka.Impl.SafeTopicHandle,System.Int32)">
<summary>
- allTopics=true - request all topics from cluster
- allTopics=false, topic=null - request only locally known topics (topic_new():ed topics or otherwise locally referenced once, such as consumed topics)
- allTopics=false, topic=valid - request specific topic
</summary>
</member>
<member name="M:Confluent.Kafka.Impl.SafeKafkaHandle.DummyOffsetCommitCb(System.IntPtr,Confluent.Kafka.ErrorCode,System.IntPtr,System.IntPtr)">
<summary>
Dummy commit callback that does nothing but prohibits
triggering the global offset_commit_cb.
Used by manual commits.
</summary>
</member>
<member name="M:Confluent.Kafka.Impl.SafeKafkaHandle.GetCTopicPartitionList(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset})">
<summary>
Creates and returns a C rd_kafka_topic_partition_list_t * populated by offsets.
</summary>
<returns>
If offsets is null a null IntPtr will be returned, else a IntPtr
which must destroyed with LibRdKafka.topic_partition_list_destroy()
</returns>
</member>
<member name="T:Confluent.Kafka.DictionaryExtensions">
<summary>
Extension methods for the <see cref="T:System.Collections.Generic.IDictionary`2"/> class.
</summary>
</member>
<member name="T:Confluent.Kafka.StringExtensions">
<summary>
Extension methods for the <see cref="T:System.String"/> class.
</summary>
</member>
<member name="T:Confluent.Kafka.TimeSpanExtensions">
<summary>
Extension methods for the <see cref="T:System.TimeSpan"/> class.
</summary>
</member>
<member name="M:Confluent.Kafka.TimeSpanExtensions.TotalMillisecondsAsInt(System.TimeSpan)">
<summary>
Converts the TimeSpan value <paramref name="timespan" /> to an integer number of milliseconds.
An <see cref="T:System.OverflowException"/> is thrown if the number of milliseconds is greater than Int32.MaxValue.
</summary>
<param name="timespan">
The TimeSpan value to convert to milliseconds.
</param>
<returns>
The TimeSpan value <paramref name="timespan" /> in milliseconds.
</returns>
</member>
<member name="T:Confluent.Kafka.Internal.Util.Marshal.StringAsPinnedUTF8">
<summary>
Convenience class for generating and pinning the UTF8
representation of a string.
</summary>
</member>
<member name="M:Confluent.Kafka.Internal.Util.Marshal.PtrToStringUTF8(System.IntPtr)">
<summary>
Interpret a zero terminated c string as UTF-8.
</summary>
</member>
<member name="T:Confluent.Kafka.IProducer`2">
<summary>
Defines a high-level Apache Kafka producer client
that provides key and value serialization.
</summary>
</member>
<member name="M:Confluent.Kafka.IProducer`2.ProduceAsync(System.String,Confluent.Kafka.Message{`0,`1},System.Threading.CancellationToken)">
<summary>
Asynchronously send a single message to a
Kafka topic. The partition the message is
sent to is determined by the partitioner
defined using the 'partitioner' configuration
property.
</summary>
<param name="topic">
The topic to produce the message to.
</param>
<param name="message">
The message to produce.
</param>
<param name="cancellationToken">
A cancellation token to observe whilst waiting
the returned task to complete.
</param>
<returns>
A Task which will complete with a delivery
report corresponding to the produce request,
or an exception if an error occured.
</returns>
<exception cref="T:Confluent.Kafka.ProduceException`2">
Thrown in response to any produce request
that was unsuccessful for any reason
(excluding user application logic errors).
The Error property of the exception provides
more detailed information.
</exception>
<exception cref="T:System.ArgumentException">
Thrown in response to invalid argument values.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.ProduceAsync(Confluent.Kafka.TopicPartition,Confluent.Kafka.Message{`0,`1},System.Threading.CancellationToken)">
<summary>
Asynchronously send a single message to a
Kafka topic/partition.
</summary>
<param name="topicPartition">
The topic partition to produce the
message to.
</param>
<param name="message">
The message to produce.
</param>
<param name="cancellationToken">
A cancellation token to observe whilst waiting
the returned task to complete.
</param>
<returns>
A Task which will complete with a delivery
report corresponding to the produce request,
or an exception if an error occured.
</returns>
<exception cref="T:Confluent.Kafka.ProduceException`2">
Thrown in response to any produce request
that was unsuccessful for any reason
(excluding user application logic errors).
The Error property of the exception provides
more detailed information.
</exception>
<exception cref="T:System.ArgumentException">
Thrown in response to invalid argument values.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.Produce(System.String,Confluent.Kafka.Message{`0,`1},System.Action{Confluent.Kafka.DeliveryReport{`0,`1}})">
<summary>
Asynchronously send a single message to a
Kafka topic. The partition the message is sent
to is determined by the partitioner defined
using the 'partitioner' configuration property.
</summary>
<param name="topic">
The topic to produce the message to.
</param>
<param name="message">
The message to produce.
</param>
<param name="deliveryHandler">
A delegate that will be called
with a delivery report corresponding to the
produce request (if enabled).
</param>
<exception cref="T:Confluent.Kafka.ProduceException`2">
Thrown in response to any error that is known
immediately (excluding user application logic
errors), for example ErrorCode.Local_QueueFull.
Asynchronous notification of unsuccessful produce
requests is made available via the <paramref name="deliveryHandler" />
parameter (if specified). The Error property of
the exception / delivery report provides more
detailed information.
</exception>
<exception cref="T:System.ArgumentException">
Thrown in response to invalid argument values.
</exception>
<exception cref="T:System.InvalidOperationException">
Thrown in response to error conditions that
reflect an error in the application logic of
the calling application.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.Produce(Confluent.Kafka.TopicPartition,Confluent.Kafka.Message{`0,`1},System.Action{Confluent.Kafka.DeliveryReport{`0,`1}})">
<summary>
Asynchronously send a single message to a
Kafka topic partition.
</summary>
<param name="topicPartition">
The topic partition to produce
the message to.
</param>
<param name="message">
The message to produce.
</param>
<param name="deliveryHandler">
A delegate that will be called
with a delivery report corresponding to the
produce request (if enabled).
</param>
<exception cref="T:Confluent.Kafka.ProduceException`2">
Thrown in response to any error that is known
immediately (excluding user application logic errors),
for example ErrorCode.Local_QueueFull. Asynchronous
notification of unsuccessful produce requests is made
available via the <paramref name="deliveryHandler" />
parameter (if specified). The Error property of the
exception / delivery report provides more detailed
information.
</exception>
<exception cref="T:System.ArgumentException">
Thrown in response to invalid argument values.
</exception>
<exception cref="T:System.InvalidOperationException">
Thrown in response to error conditions that reflect
an error in the application logic of the calling
application.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.Poll(System.TimeSpan)">
<summary>
Poll for callback events.
</summary>
<param name="timeout">
The maximum period of time to block if
no callback events are waiting. You should
typically use a relatively short timeout period
because this operation cannot be cancelled.
</param>
<returns>
Returns the number of events served since
the last call to this method or if this
method has not yet been called, over the
lifetime of the producer.
</returns>
</member>
<member name="M:Confluent.Kafka.IProducer`2.Flush(System.TimeSpan)">
<summary>
Wait until all outstanding produce requests and
delivery report callbacks are completed.
[API-SUBJECT-TO-CHANGE] - the semantics and/or
type of the return value is subject to change.
</summary>
<param name="timeout">
The maximum length of time to block.
You should typically use a relatively short
timeout period and loop until the return value
becomes zero because this operation cannot be
cancelled.
</param>
<returns>
The current librdkafka out queue length. This
should be interpreted as a rough indication of
the number of messages waiting to be sent to or
acknowledged by the broker. If zero, there are
no outstanding messages or callbacks.
Specifically, the value is equal to the sum of
the number of produced messages for which a
delivery report has not yet been handled and a
number which is less than or equal to the
number of pending delivery report callback
events (as determined by the number of
outstanding protocol requests).
</returns>
<remarks>
This method should typically be called prior to
destroying a producer instance to make sure all
queued and in-flight produce requests are
completed before terminating. The wait time is
bounded by the timeout parameter.
A related configuration parameter is
message.timeout.ms which determines the
maximum length of time librdkafka attempts
to deliver a message before giving up and
so also affects the maximum time a call to
Flush may block.
Where this Producer instance shares a Handle
with one or more other producer instances, the
Flush method will wait on messages produced by
the other producer instances as well.
</remarks>
</member>
<member name="M:Confluent.Kafka.IProducer`2.Flush(System.Threading.CancellationToken)">
<summary>
Wait until all outstanding produce requests and
delivery report callbacks are completed.
</summary>
<param name="cancellationToken">
A cancellation token to observe whilst waiting
the returned task to complete.
</param>
<remarks>
This method should typically be called prior to
destroying a producer instance to make sure all
queued and in-flight produce requests are
completed before terminating.
A related configuration parameter is
message.timeout.ms which determines the
maximum length of time librdkafka attempts
to deliver a message before giving up and
so also affects the maximum time a call to
Flush may block.
Where this Producer instance shares a Handle
with one or more other producer instances, the
Flush method will wait on messages produced by
the other producer instances as well.
</remarks>
<exception cref="T:System.OperationCanceledException">
Thrown if the operation is cancelled.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.InitTransactions(System.TimeSpan)">
<summary>
Initialize transactions for the producer instance.
This function ensures any transactions initiated by previous instances
of the producer with the same TransactionalId are completed.
If the previous instance failed with a transaction in progress the
previous transaction will be aborted.
This function needs to be called before any other transactional or
produce functions are called when the TransactionalId is configured.
If the last transaction had begun completion (following transaction commit)
but not yet finished, this function will await the previous transaction's
completion.
When any previous transactions have been fenced this function
will acquire the internal producer id and epoch, used in all future
transactional messages issued by this producer instance.
Upon successful return from this function the application has to perform at
least one of the following operations within TransactionalTimeoutMs to
avoid timing out the transaction on the broker:
* ProduceAsync (et.al)
* SendOffsetsToTransaction
* CommitTransaction
* AbortTransaction
</summary>
<param name="timeout">
The maximum length of time this method may block.
</param>
<exception cref="T:Confluent.Kafka.KafkaRetriableException">
Thrown if an error occured, and the operation may be retried.
</exception>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown on all other errors.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.BeginTransaction">
<summary>
Begin a new transaction.
InitTransactions must have been called successfully (once)
before this function is called.
Any messages produced, offsets sent (SendOffsetsToTransaction),
etc, after the successful return of this function will be part of
the transaction and committed or aborted atomatically.
Finish the transaction by calling CommitTransaction or
abort the transaction by calling AbortTransaction.
</summary>
<remark>
With the transactional producer, ProduceAsync and
Prodce calls are only allowed during an on-going
transaction, as started with this function.
Any produce call outside an on-going transaction,
or for a failed transaction, will fail.
</remark>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown on all errors.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.CommitTransaction(System.TimeSpan)">
<summary>
Commit the current transaction (as started with
BeginTransaction).
Any outstanding messages will be flushed (delivered) before actually
committing the transaction.
If any of the outstanding messages fail permanently the current
transaction will enter the abortable error state, in this case
the application must call AbortTransaction before attempting a new
transaction with BeginTransaction.
IMPORTANT NOTE: It is currently strongly recommended that the application
call CommitTransaction without specifying a timeout (which will block up
to the remaining transaction timeout - ProducerConfig.TransactionTimeoutMs)
because the Transactional Producer's API timeout handling is inconsistent with
the underlying protocol requests (known issue).
</summary>
<remark>
This function will block until all outstanding messages are
delivered and the transaction commit request has been successfully
handled by the transaction coordinator, or until <paramref name="timeout" />
expires, which ever comes first. On timeout the application may
call the function again.
</remark>
<remark>
Will automatically call Flush to ensure all queued
messages are delivered before attempting to commit the
transaction.
</remark>
<param name="timeout">
The maximum length of time this method may block.
</param>
<exception cref="T:Confluent.Kafka.KafkaTxnRequiresAbortException">
Thrown if the application must call AbortTransaction and
start a new transaction with BeginTransaction if it
wishes to proceed with transactions.
</exception>
<exception cref="T:Confluent.Kafka.KafkaRetriableException">
Thrown if an error occured, and the operation may be retried.
</exception>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown on all other errors.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.CommitTransaction">
<summary>
Commit the current transaction (as started with
BeginTransaction).
Any outstanding messages will be flushed (delivered) before actually
committing the transaction.
If any of the outstanding messages fail permanently the current
transaction will enter the abortable error state, in this case
the application must call AbortTransaction before attempting a new
transaction with BeginTransaction.
</summary>
<remark>
This function will block until all outstanding messages are
delivered and the transaction commit request has been successfully
handled by the transaction coordinator, or until the transaction
times out (ProducerConfig.TransactionTimeoutMs) which ever comes
first. On timeout the application may call the function again.
</remark>
<remark>
Will automatically call Flush to ensure all queued
messages are delivered before attempting to commit the
transaction.
</remark>
<exception cref="T:Confluent.Kafka.KafkaTxnRequiresAbortException">
Thrown if the application must call AbortTransaction and
start a new transaction with BeginTransaction if it
wishes to proceed with transactions.
</exception>
<exception cref="T:Confluent.Kafka.KafkaRetriableException">
Thrown if an error occured, and the operation may be retried.
</exception>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown on all other errors.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.AbortTransaction(System.TimeSpan)">
<summary>
Aborts the ongoing transaction.
This function should also be used to recover from non-fatal abortable
transaction errors.
Any outstanding messages will be purged and fail.
IMPORTANT NOTE: It is currently strongly recommended that the application
call AbortTransaction without specifying a timeout (which will block up
to the remaining transaction timeout - ProducerConfig.TransactionTimeoutMs)
because the Transactional Producer's API timeout handling is inconsistent with
the underlying protocol requests (known issue).
</summary>
<remark>
This function will block until all outstanding messages are purged
and the transaction abort request has been successfully
handled by the transaction coordinator, or until <paramref name="timeout" />
expires, which ever comes first. On timeout the application may
call the function again.
</remark>
<param name="timeout">
The maximum length of time this method may block.
</param>
<exception cref="T:Confluent.Kafka.KafkaRetriableException">
Thrown if an error occured, and the operation may be retried.
</exception>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown on all other errors.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.AbortTransaction">
<summary>
Aborts the ongoing transaction.
This function should also be used to recover from non-fatal abortable
transaction errors.
Any outstanding messages will be purged and fail.
</summary>
<remark>
This function will block until all outstanding messages are purged
and the transaction abort request has been successfully
handled by the transaction coordinator, or until the transaction
times out (ProducerConfig.TransactionTimeoutMs), which ever comes
first. On timeout the application may call the function again.
</remark>
<exception cref="T:Confluent.Kafka.KafkaRetriableException">
Thrown if an error occured, and the operation may be retried.
</exception>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown on all other errors.
</exception>
</member>
<member name="M:Confluent.Kafka.IProducer`2.SendOffsetsToTransaction(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset},Confluent.Kafka.IConsumerGroupMetadata,System.TimeSpan)">
<summary>
Sends a list of topic partition offsets to the consumer group
coordinator for <paramref name="groupMetadata" />, and marks
the offsets as part part of the current transaction.
These offsets will be considered committed only if the transaction is
committed successfully.
The offsets should be the next message your application will consume,
i.e., the last processed message's offset + 1 for each partition.
Either track the offsets manually during processing or use
Position property (on the consumer) to get the current offsets for
the partitions assigned to the consumer.
Use this method at the end of a consume-transform-produce loop prior
to committing the transaction with CommitTransaction.
</summary>
<remark>
The consumer must disable auto commits
(set EnableAutoCommit to false on the consumer).
</remark>
<remark>
Logical and invalid offsets (such as Offset.Unset) in
<paramref name="offsets" /> will be ignored, if there are no valid offsets in
<paramref name="offsets" /> the function will not throw
and no action will be taken.
</remark>
<param name="offsets">
List of offsets to commit to the consumer group upon
successful commit of the transaction. Offsets should be
the next message to consume, e.g., last processed message + 1.
</param>
<param name="groupMetadata">
The consumer group metadata acquired via
<see cref="P:Confluent.Kafka.IConsumer`2.ConsumerGroupMetadata" />
</param>
<param name="timeout">
The maximum length of time this method may block.
</param>
<exception cref="T:System.ArgumentException">
Thrown if group metadata is invalid.
</exception>
<exception cref="T:Confluent.Kafka.KafkaTxnRequiresAbortException">
Thrown if the application must call AbortTransaction and
start a new transaction with BeginTransaction if it
wishes to proceed with transactions.
</exception>
<exception cref="T:Confluent.Kafka.KafkaRetriableException">
Thrown if an error occured, and the operation may be retried.
</exception>
<exception cref="T:Confluent.Kafka.KafkaException">
Thrown on all other errors.
</exception>
</member>
<member name="T:Confluent.Kafka.ISerializer`1">
<summary>
Defines a serializer for use with <see cref="T:Confluent.Kafka.Producer`2" />.
</summary>
</member>
<member name="M:Confluent.Kafka.ISerializer`1.Serialize(`0,Confluent.Kafka.SerializationContext)">
<summary>
Serialize the key or value of a <see cref="T:Confluent.Kafka.Message`2" />
instance.
</summary>
<param name="data">
The value to serialize.
</param>
<param name="context">
Context relevant to the serialize operation.
</param>
<returns>
The serialized value.
</returns>
</member>
<member name="T:Confluent.Kafka.KafkaException">
<summary>
Represents an error that occured during an interaction with Kafka.
</summary>
</member>
<member name="M:Confluent.Kafka.KafkaException.#ctor(Confluent.Kafka.Error)">
<summary>
Initialize a new instance of KafkaException based on
an existing Error instance.
</summary>
<param name="error">
The Kafka Error.
</param>
</member>
<member name="M:Confluent.Kafka.KafkaException.#ctor(Confluent.Kafka.Error,System.Exception)">
<summary>
Initialize a new instance of KafkaException based on
an existing Error instance and inner exception.
</summary>
<param name="error">
The Kafka Error.
</param>
<param name="innerException">
The exception instance that caused this exception.
</param>
</member>
<member name="M:Confluent.Kafka.KafkaException.#ctor(Confluent.Kafka.ErrorCode)">
<summary>
Initialize a new instance of KafkaException based on
an existing ErrorCode value.
</summary>
<param name="code">
The Kafka ErrorCode.
</param>
</member>
<member name="P:Confluent.Kafka.KafkaException.Error">
<summary>
Gets the Error associated with this KafkaException.
</summary>
</member>
<member name="T:Confluent.Kafka.KafkaRetriableException">
<summary>
Represents an error where the operation that caused it
may be retried.
</summary>
</member>
<member name="M:Confluent.Kafka.KafkaRetriableException.#ctor(Confluent.Kafka.Error)">
<summary>
Initialize a new instance of KafkaRetriableException
based on an existing Error instance.
</summary>
<param name="error">
The Error instance.
</param>
</member>
<member name="T:Confluent.Kafka.KafkaTxnRequiresAbortException">
<summary>
Represents an error that caused the current transaction
to fail and enter the abortable state.
</summary>
</member>
<member name="M:Confluent.Kafka.KafkaTxnRequiresAbortException.#ctor(Confluent.Kafka.Error)">
<summary>
Initialize a new instance of KafkaTxnRequiresAbortException
based on an existing Error instance.
</summary>
<param name="error">
The Error instance.
</param>
</member>
<member name="T:Confluent.Kafka.Library">
<summary>
Methods that relate to the native librdkafka library itself
(do not require a Producer or Consumer broker connection).
</summary>
</member>
<member name="P:Confluent.Kafka.Library.Version">
<summary>
Gets the librdkafka version as an integer.
Interpreted as hex MM.mm.rr.xx:
- MM = Major
- mm = minor
- rr = revision
- xx = pre-release id (0xff is the final release)
E.g.: 0x000901ff = 0.9.1
</summary>
</member>
<member name="P:Confluent.Kafka.Library.VersionString">
<summary>
Gets the librdkafka version as string.
</summary>
</member>
<member name="P:Confluent.Kafka.Library.DebugContexts">
<summary>
Gets a list of the supported debug contexts.
</summary>
</member>
<member name="P:Confluent.Kafka.Library.IsLoaded">
<summary>
true if librdkafka has been successfully loaded, false if not.
</summary>
</member>
<member name="M:Confluent.Kafka.Library.Load">
<summary>
Loads the native librdkafka library. Does nothing if the library is
already loaded.
</summary>
<returns>
true if librdkafka was loaded as a result of this call, false if the
library has already been loaded.
</returns>
<remarks>
You will not typically need to call this method - librdkafka is loaded
automatically on first use of a Producer or Consumer instance.
</remarks>
</member>
<member name="M:Confluent.Kafka.Library.Load(System.String)">
<summary>
Loads the native librdkafka library from the specified path (note: the
specified path needs to include the filename). Does nothing if the
library is already loaded.
</summary>
<returns>
true if librdkafka was loaded as a result of this call, false if the
library has already been loaded.
</returns>
<remarks>
You will not typically need to call this method - librdkafka is loaded
automatically on first use of a Producer or Consumer instance.
</remarks>
</member>
<member name="P:Confluent.Kafka.Library.HandleCount">
<summary>
The total number librdkafka client instances that have been
created and not yet disposed.
</summary>
</member>
<member name="T:Confluent.Kafka.Loggers">
<summary>
OnLog callback event handler implementations.
</summary>
<remarks>
Warning: Log handlers are called spontaneously from internal librdkafka
threads and the application must not call any Confluent.Kafka APIs from
within a log handler or perform any prolonged operations.
</remarks>
</member>
<member name="M:Confluent.Kafka.Loggers.ConsoleLogger(Confluent.Kafka.LogMessage)">
<summary>
The method used to log messages by default.
</summary>
</member>
<member name="T:Confluent.Kafka.LogLevelType">
<summary>
Enumerates different log level enumerations.
</summary>
</member>
<member name="F:Confluent.Kafka.LogLevelType.SysLogLevel">
<summary>
Confluent.Kafka.SysLogLevel (severity
levels correspond to syslog)
</summary>
</member>
<member name="F:Confluent.Kafka.LogLevelType.MicrosoftExtensionsLogging">
<summary>
Microsoft.Extensions.Logging.LogLevel
</summary>
</member>
<member name="F:Confluent.Kafka.LogLevelType.SystemDiagnostics">
<summary>
System.Diagnostics.TraceLevel
</summary>
</member>
<member name="T:Confluent.Kafka.LogMessage">
<summary>
Encapsulates information provided to the
Producer/Consumer OnLog event.
</summary>
</member>
<member name="M:Confluent.Kafka.LogMessage.#ctor(System.String,Confluent.Kafka.SyslogLevel,System.String,System.String)">
<summary>
Instantiates a new LogMessage class instance.
</summary>
<param name="name">
The librdkafka client instance name.
</param>
<param name="level">
The log level (levels correspond to syslog(3)), lower is worse.
</param>
<param name="facility">
The facility (section of librdkafka code) that produced the message.
</param>
<param name="message">
The log message.
</param>
</member>
<member name="P:Confluent.Kafka.LogMessage.Name">
<summary>
Gets the librdkafka client instance name.
</summary>
</member>
<member name="P:Confluent.Kafka.LogMessage.Level">
<summary>
Gets the log level (levels correspond to syslog(3)), lower is worse.
</summary>
</member>
<member name="P:Confluent.Kafka.LogMessage.Facility">
<summary>
Gets the facility (section of librdkafka code) that produced the message.
</summary>
</member>
<member name="P:Confluent.Kafka.LogMessage.Message">
<summary>
Gets the log message.
</summary>
</member>
<member name="M:Confluent.Kafka.LogMessage.LevelAs(Confluent.Kafka.LogLevelType)">
<summary>
Convert the syslog message severity
level to correspond to the values of
a different log level enumeration type.
</summary>
</member>
<member name="T:Confluent.Kafka.Message`2">
<summary>
Represents a (deserialized) Kafka message.
</summary>
</member>
<member name="P:Confluent.Kafka.Message`2.Key">
<summary>
Gets the message key value (possibly null).
</summary>
</member>
<member name="P:Confluent.Kafka.Message`2.Value">
<summary>
Gets the message value (possibly null).
</summary>
</member>
<member name="T:Confluent.Kafka.MessageComponentType">
<summary>
Enumerates different parts of a Kafka message
</summary>
</member>
<member name="F:Confluent.Kafka.MessageComponentType.Key">
<summary>
The message key.
</summary>
</member>
<member name="F:Confluent.Kafka.MessageComponentType.Value">
<summary>
The message value.
</summary>
</member>
<member name="T:Confluent.Kafka.MessageMetadata">
<summary>
All components of <see cref="T:Confluent.Kafka.Message`2" /> except Key and Value.
</summary>
</member>
<member name="P:Confluent.Kafka.MessageMetadata.Timestamp">
<summary>
The message timestamp. The timestamp type must be set to CreateTime.
Specify Timestamp.Default to set the message timestamp to the time
of this function call.
</summary>
</member>
<member name="P:Confluent.Kafka.MessageMetadata.Headers">
<summary>
The collection of message headers (or null). Specifying null or an
empty list are equivalent. The order of headers is maintained, and
duplicate header keys are allowed.
</summary>
</member>
<member name="T:Confluent.Kafka.Metadata">
<summary>
Kafka cluster metadata.
</summary>
</member>
<member name="M:Confluent.Kafka.Metadata.#ctor(System.Collections.Generic.List{Confluent.Kafka.BrokerMetadata},System.Collections.Generic.List{Confluent.Kafka.TopicMetadata},System.Int32,System.String)">
<summary>
Instantiates a new Metadata class instance.
</summary>
<param name="brokers">
Information about each constituent broker of the cluster.
</param>
<param name="topics">
Information about requested topics in the cluster.
</param>
<param name="originatingBrokerId">
The id of the broker that provided this metadata.
</param>
<param name="originatingBrokerName">
The name of the broker that provided this metadata.
</param>
</member>
<member name="P:Confluent.Kafka.Metadata.Brokers">
<summary>
Gets information about each constituent broker of the cluster.
</summary>
</member>
<member name="P:Confluent.Kafka.Metadata.Topics">
<summary>
Gets information about requested topics in the cluster.
</summary>
</member>
<member name="P:Confluent.Kafka.Metadata.OriginatingBrokerId">
<summary>
Gets the id of the broker that provided this metadata.
</summary>
</member>
<member name="P:Confluent.Kafka.Metadata.OriginatingBrokerName">
<summary>
Gets the name of the broker that provided this metadata.
</summary>
</member>
<member name="M:Confluent.Kafka.Metadata.ToString">
<summary>
Returns a JSON representation of the Metadata object.
</summary>
<returns>
A JSON representation of the Metadata object.
</returns>
</member>
<member name="T:Confluent.Kafka.Null">
<summary>
A type for use in conjunction with NullSerializer and NullDeserializer
that enables null key or values to be enforced when producing or
consuming messages.
</summary>
</member>
<member name="T:Confluent.Kafka.Offset">
<summary>
Represents a Kafka partition offset value.
</summary>
<remarks>
This structure is the same size as a long -
its purpose is to add some syntactical sugar
related to special values.
</remarks>
</member>
<member name="F:Confluent.Kafka.Offset.Beginning">
<summary>
A special value that refers to the beginning of a partition.
</summary>
</member>
<member name="F:Confluent.Kafka.Offset.End">
<summary>
A special value that refers to the end of a partition.
</summary>
</member>
<member name="F:Confluent.Kafka.Offset.Stored">
<summary>
A special value that refers to the stored offset for a partition.
</summary>
</member>
<member name="F:Confluent.Kafka.Offset.Unset">
<summary>
A special value that refers to an invalid, unassigned or default partition offset.
</summary>
</member>
<member name="M:Confluent.Kafka.Offset.#ctor(System.Int64)">
<summary>
Initializes a new instance of the Offset structure.
</summary>
<param name="offset">
The offset value
</param>
</member>
<member name="P:Confluent.Kafka.Offset.Value">
<summary>
Gets the long value corresponding to this offset.
</summary>
</member>
<member name="P:Confluent.Kafka.Offset.IsSpecial">
<summary>
Gets whether or not this is one of the special
offset values.
</summary>
</member>
<member name="M:Confluent.Kafka.Offset.Equals(System.Object)">
<summary>
Tests whether this Offset value is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is an Offset and has the same value. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.Equals(Confluent.Kafka.Offset)">
<summary>
Tests whether this Offset value is equal to the specified Offset.
</summary>
<param name="other">
The offset to test.
</param>
<returns>
true if other has the same value. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_Equality(Confluent.Kafka.Offset,Confluent.Kafka.Offset)">
<summary>
Tests whether Offset value a is equal to Offset value b.
</summary>
<param name="a">
The first Offset value to compare.
</param>
<param name="b">
The second Offset value to compare.
</param>
<returns>
true if Offset value a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_Inequality(Confluent.Kafka.Offset,Confluent.Kafka.Offset)">
<summary>
Tests whether Offset value a is not equal to Offset value b.
</summary>
<param name="a">
The first Offset value to compare.
</param>
<param name="b">
The second Offset value to compare.
</param>
<returns>
true if Offset value a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_GreaterThan(Confluent.Kafka.Offset,Confluent.Kafka.Offset)">
<summary>
Tests whether Offset value a is greater than Offset value b.
</summary>
<param name="a">
The first Offset value to compare.
</param>
<param name="b">
The second Offset value to compare.
</param>
<returns>
true if Offset value a is greater than Offset value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_LessThan(Confluent.Kafka.Offset,Confluent.Kafka.Offset)">
<summary>
Tests whether Offset value a is less than Offset value b.
</summary>
<param name="a">
The first Offset value to compare.
</param>
<param name="b">
The second Offset value to compare.
</param>
<returns>
true if Offset value a is less than Offset value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_GreaterThanOrEqual(Confluent.Kafka.Offset,Confluent.Kafka.Offset)">
<summary>
Tests whether Offset value a is greater than or equal to Offset value b.
</summary>
<param name="a">
The first Offset value to compare.
</param>
<param name="b">
The second Offset value to compare.
</param>
<returns>
true if Offset value a is greater than or equal to Offset value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_LessThanOrEqual(Confluent.Kafka.Offset,Confluent.Kafka.Offset)">
<summary>
Tests whether Offset value a is less than or equal to Offset value b.
</summary>
<param name="a">
The first Offset value to compare.
</param>
<param name="b">
The second Offset value to compare.
</param>
<returns>
true if Offset value a is less than or equal to Offset value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_Addition(Confluent.Kafka.Offset,System.Int32)">
<summary>
Add an integer value to an Offset value.
</summary>
<param name="a">
The Offset value to add the integer value to.
</param>
<param name="b">
The integer value to add to the Offset value.
</param>
<returns>
The Offset value incremented by the integer value b.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_Addition(Confluent.Kafka.Offset,System.Int64)">
<summary>
Add a long value to an Offset value.
</summary>
<param name="a">
The Offset value to add the long value to.
</param>
<param name="b">
The long value to add to the Offset value.
</param>
<returns>
The Offset value incremented by the long value b.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.GetHashCode">
<summary>
Returns a hash code for this Offset.
</summary>
<returns>
An integer that specifies a hash value for this Offset.
</returns>
</member>
<member name="M:Confluent.Kafka.Offset.op_Implicit(System.Int64)~Confluent.Kafka.Offset">
<summary>
Converts the specified long value to an Offset value.
</summary>
<param name="v">
The long value to convert.
</param>
</member>
<member name="M:Confluent.Kafka.Offset.op_Implicit(Confluent.Kafka.Offset)~System.Int64">
<summary>
Converts the specified Offset value to a long value.
</summary>
<param name="o">
The Offset value to convert.
</param>
</member>
<member name="M:Confluent.Kafka.Offset.ToString">
<summary>
Returns a string representation of the Offset object.
</summary>
<returns>
A string that represents the Offset object.
</returns>
</member>
<member name="T:Confluent.Kafka.Partition">
<summary>
Represents a Kafka partition.
</summary>
<remarks>
This structure is the same size as an int -
its purpose is to add some syntactical sugar
related to special values.
</remarks>
</member>
<member name="F:Confluent.Kafka.Partition.Any">
<summary>
A special value that refers to an unspecified / unknown partition.
</summary>
</member>
<member name="M:Confluent.Kafka.Partition.#ctor(System.Int32)">
<summary>
Initializes a new instance of the Partition structure.
</summary>
<param name="partition">
The partition value
</param>
</member>
<member name="P:Confluent.Kafka.Partition.Value">
<summary>
Gets the int value corresponding to this partition.
</summary>
</member>
<member name="P:Confluent.Kafka.Partition.IsSpecial">
<summary>
Gets whether or not this is one of the special
partition values.
</summary>
</member>
<member name="M:Confluent.Kafka.Partition.Equals(System.Object)">
<summary>
Tests whether this Partition value is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is a Partition instance and has the same value. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.Equals(Confluent.Kafka.Partition)">
<summary>
Tests whether this Partition value is equal to the specified Partition.
</summary>
<param name="other">
The partition to test.
</param>
<returns>
true if other has the same value. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.op_Equality(Confluent.Kafka.Partition,Confluent.Kafka.Partition)">
<summary>
Tests whether Partition value a is equal to Partition value b.
</summary>
<param name="a">
The first Partition value to compare.
</param>
<param name="b">
The second Partition value to compare.
</param>
<returns>
true if Partition value a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.op_Inequality(Confluent.Kafka.Partition,Confluent.Kafka.Partition)">
<summary>
Tests whether Partition value a is not equal to Partition value b.
</summary>
<param name="a">
The first Partition value to compare.
</param>
<param name="b">
The second Partition value to compare.
</param>
<returns>
true if Partition value a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.op_GreaterThan(Confluent.Kafka.Partition,Confluent.Kafka.Partition)">
<summary>
Tests whether Partition value a is greater than Partition value b.
</summary>
<param name="a">
The first Partition value to compare.
</param>
<param name="b">
The second Partition value to compare.
</param>
<returns>
true if Partition value a is greater than Partition value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.op_LessThan(Confluent.Kafka.Partition,Confluent.Kafka.Partition)">
<summary>
Tests whether Partition value a is less than Partition value b.
</summary>
<param name="a">
The first Partition value to compare.
</param>
<param name="b">
The second Partition value to compare.
</param>
<returns>
true if Partition value a is less than Partition value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.op_GreaterThanOrEqual(Confluent.Kafka.Partition,Confluent.Kafka.Partition)">
<summary>
Tests whether Partition value a is greater than or equal to Partition value b.
</summary>
<param name="a">
The first Partition value to compare.
</param>
<param name="b">
The second Partition value to compare.
</param>
<returns>
true if Partition value a is greater than or equal to Partition value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.op_LessThanOrEqual(Confluent.Kafka.Partition,Confluent.Kafka.Partition)">
<summary>
Tests whether Partition value a is less than or equal to Partition value b.
</summary>
<param name="a">
The first Partition value to compare.
</param>
<param name="b">
The second Partition value to compare.
</param>
<returns>
true if Partition value a is less than or equal to Partition value b. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.GetHashCode">
<summary>
Returns a hash code for this Partition.
</summary>
<returns>
An integer that specifies a hash value for this Partition.
</returns>
</member>
<member name="M:Confluent.Kafka.Partition.op_Implicit(System.Int32)~Confluent.Kafka.Partition">
<summary>
Converts the specified int value to an Partition value.
</summary>
<param name="v">
The int value to convert.
</param>
</member>
<member name="M:Confluent.Kafka.Partition.op_Implicit(Confluent.Kafka.Partition)~System.Int32">
<summary>
Converts the specified Partition value to an int value.
</summary>
<param name="o">
The Partition value to convert.
</param>
</member>
<member name="M:Confluent.Kafka.Partition.ToString">
<summary>
Returns a string representation of the Partition object.
</summary>
<returns>
A string that represents the Partition object.
</returns>
</member>
<member name="T:Confluent.Kafka.PartitionMetadata">
<summary>
Metadata pertaining to a single Kafka topic partition.
</summary>
</member>
<member name="M:Confluent.Kafka.PartitionMetadata.#ctor(System.Int32,System.Int32,System.Int32[],System.Int32[],Confluent.Kafka.Error)">
<summary>
Initializes a new PartitionMetadata instance.
</summary>
<param name="partitionId">
The id of the partition this metadata relates to.
</param>
<param name="leader">
The id of the broker that is the leader for the partition.
</param>
<param name="replicas">
The ids of all brokers that contain replicas of the partition.
</param>
<param name="inSyncReplicas">
The ids of all brokers that contain in-sync replicas of the partition.
Note: this value is cached by the broker and is consequently not guaranteed to be up-to-date.
</param>
<param name="error">
A rich <see cref="P:Confluent.Kafka.PartitionMetadata.Error"/> object associated with the request for this partition metadata.
</param>
</member>
<member name="P:Confluent.Kafka.PartitionMetadata.PartitionId">
<summary>
Gets the id of the partition this metadata relates to.
</summary>
</member>
<member name="P:Confluent.Kafka.PartitionMetadata.Leader">
<summary>
Gets the id of the broker that is the leader for the partition.
</summary>
</member>
<member name="P:Confluent.Kafka.PartitionMetadata.Replicas">
<summary>
Gets the ids of all brokers that contain replicas of the partition.
</summary>
</member>
<member name="P:Confluent.Kafka.PartitionMetadata.InSyncReplicas">
<summary>
Gets the ids of all brokers that contain in-sync replicas of the partition.
</summary>
</member>
<member name="P:Confluent.Kafka.PartitionMetadata.Error">
<summary>
Gets a rich <see cref="P:Confluent.Kafka.PartitionMetadata.Error"/> object associated with the request for this partition metadata.
Note: this value is cached by the broker and is consequently not guaranteed to be up-to-date.
</summary>
</member>
<member name="M:Confluent.Kafka.PartitionMetadata.ToString">
<summary>
Returns a JSON representation of the PartitionMetadata object.
</summary>
<returns>
A JSON representation the PartitionMetadata object.
</returns>
</member>
<member name="T:Confluent.Kafka.PersistenceStatus">
<summary>
Enumeration of possible message persistence states.
</summary>
</member>
<member name="F:Confluent.Kafka.PersistenceStatus.NotPersisted">
<summary>
Message was never transmitted to the broker, or failed with
an error indicating it was not written to the log.
Application retry risks ordering, but not duplication.
</summary>
</member>
<member name="F:Confluent.Kafka.PersistenceStatus.PossiblyPersisted">
<summary>
Message was transmitted to broker, but no acknowledgement was
received. Application retry risks ordering and duplication.
</summary>
</member>
<member name="F:Confluent.Kafka.PersistenceStatus.Persisted">
<summary>
Message was written to the log and acknowledged by the broker.
Note: acks='all' should be used for this to be fully trusted
in case of a broker failover.
</summary>
</member>
<member name="T:Confluent.Kafka.ProduceException`2">
<summary>
Represents an error that occured whilst producing a message.
</summary>
</member>
<member name="M:Confluent.Kafka.ProduceException`2.#ctor(Confluent.Kafka.Error,Confluent.Kafka.DeliveryResult{`0,`1},System.Exception)">
<summary>
Initialize a new instance of ProduceException based on
an existing Error value.
</summary>
<param name="error">
The error associated with the delivery result.
</param>
<param name="deliveryResult">
The delivery result associated with the produce request.
</param>
<param name="innerException">
The exception instance that caused this exception.
</param>
</member>
<member name="M:Confluent.Kafka.ProduceException`2.#ctor(Confluent.Kafka.Error,Confluent.Kafka.DeliveryResult{`0,`1})">
<summary>
Initialize a new instance of ProduceException based on
an existing Error value.
</summary>
<param name="error">
The error associated with the delivery report.
</param>
<param name="deliveryResult">
The delivery result associated with the produce request.
</param>
</member>
<member name="P:Confluent.Kafka.ProduceException`2.DeliveryResult">
<summary>
The delivery result associated with the produce request.
</summary>
</member>
<member name="T:Confluent.Kafka.Producer`2">
<summary>
A high level producer with serialization capability.
</summary>
</member>
<member name="M:Confluent.Kafka.Producer`2.Poll(System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.Flush(System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.Flush(System.Threading.CancellationToken)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.Dispose">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.Dispose(System.Boolean)">
<summary>
Releases the unmanaged resources used by the
<see cref="T:Confluent.Kafka.Producer`2" />
and optionally disposes the managed resources.
</summary>
<param name="disposing">
true to release both managed and unmanaged resources;
false to release only unmanaged resources.
</param>
</member>
<member name="P:Confluent.Kafka.Producer`2.Name">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.AddBrokers(System.String)">
<inheritdoc/>
</member>
<member name="P:Confluent.Kafka.Producer`2.Handle">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.ProduceAsync(Confluent.Kafka.TopicPartition,Confluent.Kafka.Message{`0,`1},System.Threading.CancellationToken)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.ProduceAsync(System.String,Confluent.Kafka.Message{`0,`1},System.Threading.CancellationToken)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.Produce(System.String,Confluent.Kafka.Message{`0,`1},System.Action{Confluent.Kafka.DeliveryReport{`0,`1}})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.Produce(Confluent.Kafka.TopicPartition,Confluent.Kafka.Message{`0,`1},System.Action{Confluent.Kafka.DeliveryReport{`0,`1}})">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.InitTransactions(System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.BeginTransaction">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.CommitTransaction(System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.CommitTransaction">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.AbortTransaction(System.TimeSpan)">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.AbortTransaction">
<inheritdoc/>
</member>
<member name="M:Confluent.Kafka.Producer`2.SendOffsetsToTransaction(System.Collections.Generic.IEnumerable{Confluent.Kafka.TopicPartitionOffset},Confluent.Kafka.IConsumerGroupMetadata,System.TimeSpan)">
<inheritdoc/>
</member>
<member name="T:Confluent.Kafka.PartitionerDelegate">
<summary>
Calculate a partition number given a <paramref name="partitionCount" />
and serialized <paramref name="keyData" />. The <paramref name="topic" />
is also provided, but is typically not used.
</summary>
<remarks>
A partioner instance may be called in any thread at any time and
may be called multiple times for the same message/key.
A partitioner:
- MUST NOT call any method on the producer instance.
- MUST NOT block or execute for prolonged periods of time.
- MUST return a value between 0 and partitionCnt-1.
- MUST NOT throw any exception.
</remarks>
<param name="topic">
The topic.
</param>
<param name="partitionCount">
The number of partitions in <paramref name="topic" />.
</param>
<param name="keyData">
The serialized key data.
</param>
<param name="keyIsNull">
Whether or not the key is null (distinguishes the null and empty case).
</param>
<returns>
The calculated <seealso cref="T:Confluent.Kafka.Partition"/>, possibly
<seealso cref="F:Confluent.Kafka.Partition.Any"/>.
</returns>
</member>
<member name="T:Confluent.Kafka.ProducerBuilder`2">
<summary>
A builder class for <see cref="T:Confluent.Kafka.IProducer`2" />.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.Config">
<summary>
The config dictionary.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.ErrorHandler">
<summary>
The configured error handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.LogHandler">
<summary>
The configured log handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.StatisticsHandler">
<summary>
The configured statistics handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.OAuthBearerTokenRefreshHandler">
<summary>
The configured OAuthBearer Token Refresh handler.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.Partitioners">
<summary>
The per-topic custom partitioners.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.DefaultPartitioner">
<summary>
The default custom partitioner.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.KeySerializer">
<summary>
The configured key serializer.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.ValueSerializer">
<summary>
The configured value serializer.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.AsyncKeySerializer">
<summary>
The configured async key serializer.
</summary>
</member>
<member name="P:Confluent.Kafka.ProducerBuilder`2.AsyncValueSerializer">
<summary>
The configured async value serializer.
</summary>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.#ctor(System.Collections.Generic.IEnumerable{System.Collections.Generic.KeyValuePair{System.String,System.String}})">
<summary>
A collection of librdkafka configuration parameters
(refer to https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
and parameters specific to this client (refer to:
<see cref="T:Confluent.Kafka.ConfigPropertyNames" />).
At a minimum, 'bootstrap.servers' must be specified.
</summary>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetStatisticsHandler(System.Action{Confluent.Kafka.IProducer{`0,`1},System.String})">
<summary>
Set the handler to call on statistics events. Statistics are provided as
a JSON formatted string as defined here:
https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md
</summary>
<remarks>
You can enable statistics and set the statistics interval
using the StatisticsIntervalMs configuration property
(disabled by default).
Executes on the poll thread (by default, a background thread managed by
the producer).
Exceptions: Any exception thrown by your statistics handler
will be devivered to your error handler, if set, else they will be
silently ignored.
</remarks>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetPartitioner(System.String,Confluent.Kafka.PartitionerDelegate)">
<summary>
Set a custom partitioner to use when producing messages to
<paramref name="topic" />.
</summary>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetDefaultPartitioner(Confluent.Kafka.PartitionerDelegate)">
<summary>
Set a custom partitioner that will be used for all topics
except those for which a partitioner has been explicitly configured.
</summary>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetErrorHandler(System.Action{Confluent.Kafka.IProducer{`0,`1},Confluent.Kafka.Error})">
<summary>
Set the handler to call on error events e.g. connection failures or all
brokers down. Note that the client will try to automatically recover from
errors that are not marked as fatal. Non-fatal errors should be interpreted
as informational rather than catastrophic.
</summary>
<remarks>
Executes on the poll thread (by default, a background thread managed by
the producer).
Exceptions: Any exception thrown by your error handler will be silently
ignored.
</remarks>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetLogHandler(System.Action{Confluent.Kafka.IProducer{`0,`1},Confluent.Kafka.LogMessage})">
<summary>
Set the handler to call when there is information available
to be logged. If not specified, a default callback that writes
to stderr will be used.
</summary>
<remarks>
By default not many log messages are generated.
For more verbose logging, specify one or more debug contexts
using the Debug configuration property.
Warning: Log handlers are called spontaneously from internal
librdkafka threads and the application must not call any
Confluent.Kafka APIs from within a log handler or perform any
prolonged operations.
Exceptions: Any exception thrown by your log handler will be
silently ignored.
</remarks>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetOAuthBearerTokenRefreshHandler(System.Action{Confluent.Kafka.IProducer{`0,`1},System.String})">
<summary>
Set SASL/OAUTHBEARER token refresh callback in provided
conf object. The SASL/OAUTHBEARER token refresh callback
is triggered via <see cref="M:Confluent.Kafka.IProducer`2.Poll(System.TimeSpan)"/>
whenever OAUTHBEARER is the SASL mechanism and a token
needs to be retrieved, typically based on the configuration
defined in sasl.oauthbearer.config. The callback should
invoke <see cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetToken(Confluent.Kafka.IClient,System.String,System.Int64,System.String,System.Collections.Generic.IDictionary{System.String,System.String})"/>
or <see cref="M:Confluent.Kafka.ClientExtensions.OAuthBearerSetTokenFailure(Confluent.Kafka.IClient,System.String)"/>
to indicate success or failure, respectively.
An unsecured JWT refresh handler is provided by librdkafka
for development and testing purposes, it is enabled by
setting the enable.sasl.oauthbearer.unsecure.jwt property
to true and is mutually exclusive to using a refresh callback.
</summary>
<param name="oAuthBearerTokenRefreshHandler">
the callback to set; callback function arguments:
IConsumer - instance of the consumer which should be used to
set token or token failure string - Value of configuration
property sasl.oauthbearer.config
</param>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetKeySerializer(Confluent.Kafka.ISerializer{`0})">
<summary>
The serializer to use to serialize keys.
</summary>
<remarks>
If your key serializer throws an exception, this will be
wrapped in a ProduceException with ErrorCode
Local_KeySerialization and thrown by the initiating call to
Produce or ProduceAsync.
</remarks>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetValueSerializer(Confluent.Kafka.ISerializer{`1})">
<summary>
The serializer to use to serialize values.
</summary>
<remarks>
If your value serializer throws an exception, this will be
wrapped in a ProduceException with ErrorCode
Local_ValueSerialization and thrown by the initiating call to
Produce or ProduceAsync.
</remarks>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetKeySerializer(Confluent.Kafka.IAsyncSerializer{`0})">
<summary>
The serializer to use to serialize keys.
</summary>
<remarks>
If your key serializer throws an exception, this will be
wrapped in a ProduceException with ErrorCode
Local_KeySerialization and thrown by the initiating call to
Produce or ProduceAsync.
</remarks>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.SetValueSerializer(Confluent.Kafka.IAsyncSerializer{`1})">
<summary>
The serializer to use to serialize values.
</summary>
<remarks>
If your value serializer throws an exception, this will be
wrapped in a ProduceException with ErrorCode
Local_ValueSerialization and thrown by the initiating call to
Produce or ProduceAsync.
</remarks>
</member>
<member name="M:Confluent.Kafka.ProducerBuilder`2.Build">
<summary>
Build a new IProducer implementation instance.
</summary>
</member>
<member name="T:Confluent.Kafka.SerializationContext">
<summary>
Context relevant to a serialization or deserialization operation.
</summary>
</member>
<member name="P:Confluent.Kafka.SerializationContext.Empty">
<summary>
The default SerializationContext value (representing no context defined).
</summary>
</member>
<member name="M:Confluent.Kafka.SerializationContext.#ctor(Confluent.Kafka.MessageComponentType,System.String,Confluent.Kafka.Headers)">
<summary>
Create a new SerializationContext object instance.
</summary>
<param name="component">
The component of the message the serialization operation relates to.
</param>
<param name="topic">
The topic the data is being written to or read from.
</param>
<param name="headers">
The collection of message headers (or null). Specifying null or an
empty list are equivalent. The order of headers is maintained, and
duplicate header keys are allowed.
</param>
</member>
<member name="P:Confluent.Kafka.SerializationContext.Topic">
<summary>
The topic the data is being written to or read from.
</summary>
</member>
<member name="P:Confluent.Kafka.SerializationContext.Component">
<summary>
The component of the message the serialization operation relates to.
</summary>
</member>
<member name="P:Confluent.Kafka.SerializationContext.Headers">
<summary>
The collection of message headers (or null). Specifying null or an
empty list are equivalent. The order of headers is maintained, and
duplicate header keys are allowed.
</summary>
</member>
<member name="T:Confluent.Kafka.Serializers">
<summary>
Serializers for use with <see cref="T:Confluent.Kafka.Producer`2" />.
</summary>
</member>
<member name="F:Confluent.Kafka.Serializers.Utf8">
<summary>
String (UTF8) serializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Serializers.Null">
<summary>
Null serializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Serializers.Int64">
<summary>
System.Int64 (big endian, network byte order) serializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Serializers.Int32">
<summary>
System.Int32 (big endian, network byte order) serializer.
</summary>
</member>
<member name="F:Confluent.Kafka.Serializers.Single">
<summary>
System.Single (big endian, network byte order) serializer
</summary>
</member>
<member name="F:Confluent.Kafka.Serializers.Double">
<summary>
System.Double (big endian, network byte order) serializer
</summary>
</member>
<member name="F:Confluent.Kafka.Serializers.ByteArray">
<summary>
System.Byte[] (nullable) serializer.
</summary>
<remarks>
Byte order is original order.
</remarks>
</member>
<member name="T:Confluent.Kafka.SyncOverAsync.SyncOverAsyncDeserializer`1">
<summary>
An adapter that allows an async deserializer
to be used where a sync deserializer is required.
In using this adapter, there are two potential
issues you should be aware of:
1. If you are working in a single threaded
SynchronizationContext (for example, a
WindowsForms application), you must ensure
that all methods awaited by your deserializer
(at all levels) are configured to NOT
continue on the captured context, otherwise
your application will deadlock. You do this
by calling .ConfigureAwait(false) on every
method awaited in your deserializer
implementation. If your deserializer makes use
of a library that does not do this, you
can get around this by calling await
Task.Run(() => ...) to force the library
method to execute in a SynchronizationContext
that is not single threaded. Note: all
Confluent async deserializers comply with the
above.
2. In any application, there is potential
for a deadlock due to thread pool exhaustion.
This can happen because in order for an async
method to complete, a thread pool thread is
typically required. However, if all available
thread pool threads are in use waiting for the
async methods to complete, there will be
no threads available to complete the tasks
(deadlock). Due to (a) the large default
number of thread pool threads in the modern
runtime and (b) the infrequent need for a
typical async deserializer to wait on an async
result (i.e. most deserializers will only
infrequently need to execute asynchronously),
this scenario should not commonly occur in
practice.
</summary>
</member>
<member name="M:Confluent.Kafka.SyncOverAsync.SyncOverAsyncDeserializer`1.#ctor(Confluent.Kafka.IAsyncDeserializer{`0})">
<summary>
Initializes a new SyncOverAsyncDeserializer.
</summary>
</member>
<member name="M:Confluent.Kafka.SyncOverAsync.SyncOverAsyncDeserializer`1.Deserialize(System.ReadOnlySpan{System.Byte},System.Boolean,Confluent.Kafka.SerializationContext)">
<summary>
Deserialize a message key or value.
</summary>
<param name="data">
The data to deserialize.
</param>
<param name="isNull">
Whether or not the value is null.
</param>
<param name="context">
Context relevant to the deserialize
operation.
</param>
<returns>
The deserialized value.
</returns>
</member>
<member name="T:Confluent.Kafka.SyncOverAsync.SyncOverAsyncDeserializerExtensionMethods">
<summary>
Extension methods related to SyncOverAsyncDeserializer.
</summary>
</member>
<member name="M:Confluent.Kafka.SyncOverAsync.SyncOverAsyncDeserializerExtensionMethods.AsSyncOverAsync``1(Confluent.Kafka.IAsyncDeserializer{``0})">
<summary>
Create a sync deserializer by wrapping an async
one. For more information on the potential
pitfalls in doing this, refer to <see cref="T:Confluent.Kafka.SyncOverAsync.SyncOverAsyncDeserializer`1" />.
</summary>
</member>
<member name="T:Confluent.Kafka.SyncOverAsync.SyncOverAsyncSerializer`1">
<summary>
An adapter that allows an async serializer
to be used where a sync serializer is required.
In using this adapter, there are two potential
issues you should be aware of:
1. If you are working in a single threaded
SynchronizationContext (for example, a
WindowsForms application), you must ensure
that all methods awaited by your serializer
(at all levels) are configured to NOT
continue on the captured context, otherwise
your application will deadlock. You do this
by calling .ConfigureAwait(false) on every
method awaited in your serializer
implementation. If your serializer makes use
of a library that does not do this, you
can get around this by calling await
Task.Run(() => ...) to force the library
method to execute in a SynchronizationContext
that is not single threaded. Note: all
Confluent async serializers are safe to use
with this adapter.
2. In any application, there is potential
for a deadlock due to thread pool exhaustion.
This can happen because in order for an async
method to complete, a thread pool thread is
typically required. However, if all available
thread pool threads are in use waiting for the
async methods to complete, there will be
no threads available to complete the tasks
(deadlock). Due to (a) the large default
number of thread pool threads in the modern
runtime and (b) the infrequent need for a
typical async serializer to wait on an async
result (i.e. most serializers will only
infrequently need to execute asynchronously),
this scenario should not commonly occur in
practice.
</summary>
</member>
<member name="M:Confluent.Kafka.SyncOverAsync.SyncOverAsyncSerializer`1.#ctor(Confluent.Kafka.IAsyncSerializer{`0})">
<summary>
Initializes a new SyncOverAsyncSerializer
instance.
</summary>
</member>
<member name="M:Confluent.Kafka.SyncOverAsync.SyncOverAsyncSerializer`1.Serialize(`0,Confluent.Kafka.SerializationContext)">
<summary>
Serialize the key or value of a <see cref="T:Confluent.Kafka.Message`2" />
instance.
</summary>
<param name="data">
The value to serialize.
</param>
<param name="context">
Context relevant to the serialize operation.
</param>
<returns>
the serialized data.
</returns>
</member>
<member name="T:Confluent.Kafka.SyncOverAsync.SyncOverAsyncSerializerExtensionMethods">
<summary>
Extension methods related to SyncOverAsyncSerializer.
</summary>
</member>
<member name="M:Confluent.Kafka.SyncOverAsync.SyncOverAsyncSerializerExtensionMethods.AsSyncOverAsync``1(Confluent.Kafka.IAsyncSerializer{``0})">
<summary>
Create a sync serializer by wrapping an async
one. For more information on the potential
pitfalls in doing this, refer to <see cref="T:Confluent.Kafka.SyncOverAsync.SyncOverAsyncSerializer`1" />.
</summary>
</member>
<member name="T:Confluent.Kafka.SyslogLevel">
<summary>
Represents enumeration with levels coming from syslog(3)
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Emergency">
<summary>
System is unusable.
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Alert">
<summary>
Action must be take immediately
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Critical">
<summary>
Critical condition.
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Error">
<summary>
Error condition.
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Warning">
<summary>
Warning condition.
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Notice">
<summary>
Normal, but significant condition.
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Info">
<summary>
Informational message.
</summary>
</member>
<member name="F:Confluent.Kafka.SyslogLevel.Debug">
<summary>
Debug-level message.
</summary>
</member>
<member name="T:Confluent.Kafka.Timestamp">
<summary>
Encapsulates a Kafka timestamp and its type.
</summary>
</member>
<member name="P:Confluent.Kafka.Timestamp.Default">
<summary>
A read-only field representing an unspecified timestamp.
</summary>
</member>
<member name="F:Confluent.Kafka.Timestamp.UnixTimeEpoch">
<summary>
Unix epoch as a UTC DateTime. Unix time is defined as
the number of seconds past this UTC time, excluding
leap seconds.
</summary>
</member>
<member name="M:Confluent.Kafka.Timestamp.#ctor(System.Int64,Confluent.Kafka.TimestampType)">
<summary>
Initializes a new instance of the Timestamp structure.
</summary>
<param name="unixTimestampMs">
The unix millisecond timestamp.
</param>
<param name="type">
The type of the timestamp.
</param>
</member>
<member name="M:Confluent.Kafka.Timestamp.#ctor(System.DateTime,Confluent.Kafka.TimestampType)">
<summary>
Initializes a new instance of the Timestamp structure.
Note: <paramref name="dateTime"/> is first converted to UTC
if it is not already.
</summary>
<param name="dateTime">
The DateTime value corresponding to the timestamp.
</param>
<param name="type">
The type of the timestamp.
</param>
</member>
<member name="M:Confluent.Kafka.Timestamp.#ctor(System.DateTime)">
<summary>
Initializes a new instance of the Timestamp structure.
Note: <paramref name="dateTime" /> is first converted
to UTC if it is not already and TimestampType is set
to CreateTime.
</summary>
<param name="dateTime">
The DateTime value corresponding to the timestamp.
</param>
</member>
<member name="M:Confluent.Kafka.Timestamp.#ctor(System.DateTimeOffset)">
<summary>
Initializes a new instance of the Timestamp structure.
Note: TimestampType is set to CreateTime.
</summary>
<param name="dateTimeOffset">
The DateTimeOffset value corresponding to the timestamp.
</param>
</member>
<member name="P:Confluent.Kafka.Timestamp.Type">
<summary>
Gets the timestamp type.
</summary>
</member>
<member name="P:Confluent.Kafka.Timestamp.UnixTimestampMs">
<summary>
Get the Unix millisecond timestamp.
</summary>
</member>
<member name="P:Confluent.Kafka.Timestamp.UtcDateTime">
<summary>
Gets the UTC DateTime corresponding to the <see cref="P:Confluent.Kafka.Timestamp.UnixTimestampMs"/>.
</summary>
</member>
<member name="M:Confluent.Kafka.Timestamp.Equals(System.Object)">
<summary>
Determines whether two Timestamps have the same value.
</summary>
<param name="obj">
Determines whether this instance and a specified object,
which must also be a Timestamp object, have the same value.
</param>
<returns>
true if obj is a Timestamp and its value is the same as
this instance; otherwise, false. If obj is null, the method
returns false.
</returns>
</member>
<member name="M:Confluent.Kafka.Timestamp.Equals(Confluent.Kafka.Timestamp)">
<summary>
Determines whether two Timestamps have the same value.
</summary>
<param name="other">
The timestamp to test.
</param>
<returns>
true if other has the same value. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.Timestamp.GetHashCode">
<summary>
Returns the hashcode for this Timestamp.
</summary>
<returns>
A 32-bit signed integer hash code.
</returns>
</member>
<member name="M:Confluent.Kafka.Timestamp.op_Equality(Confluent.Kafka.Timestamp,Confluent.Kafka.Timestamp)">
<summary>
Determines whether two specified Timestamps have the same value.
</summary>
<param name="a">
The first Timestamp to compare.
</param>
<param name="b">
The second Timestamp to compare
</param>
<returns>
true if the value of a is the same as the value of b; otherwise, false.
</returns>
</member>
<member name="M:Confluent.Kafka.Timestamp.op_Inequality(Confluent.Kafka.Timestamp,Confluent.Kafka.Timestamp)">
<summary>
Determines whether two specified Timestamps have different values.
</summary>
<param name="a">
The first Timestamp to compare.
</param>
<param name="b">
The second Timestamp to compare
</param>
<returns>
true if the value of a is different from the value of b; otherwise, false.
</returns>
</member>
<member name="M:Confluent.Kafka.Timestamp.DateTimeToUnixTimestampMs(System.DateTime)">
<summary>
Convert a DateTime instance to a milliseconds unix timestamp.
Note: <paramref name="dateTime"/> is first converted to UTC
if it is not already.
</summary>
<param name="dateTime">
The DateTime value to convert.
</param>
<returns>
The milliseconds unix timestamp corresponding to <paramref name="dateTime"/>
rounded down to the previous millisecond.
</returns>
</member>
<member name="M:Confluent.Kafka.Timestamp.UnixTimestampMsToDateTime(System.Int64)">
<summary>
Convert a milliseconds unix timestamp to a DateTime value.
</summary>
<param name="unixMillisecondsTimestamp">
The milliseconds unix timestamp to convert.
</param>
<returns>
The DateTime value associated with <paramref name="unixMillisecondsTimestamp"/> with Utc Kind.
</returns>
</member>
<member name="T:Confluent.Kafka.TimestampType">
<summary>
Enumerates the different meanings of a message timestamp value.
</summary>
</member>
<member name="F:Confluent.Kafka.TimestampType.NotAvailable">
<summary>
Timestamp type is unknown.
</summary>
</member>
<member name="F:Confluent.Kafka.TimestampType.CreateTime">
<summary>
Timestamp relates to message creation time as set by a Kafka client.
</summary>
</member>
<member name="F:Confluent.Kafka.TimestampType.LogAppendTime">
<summary>
Timestamp relates to the time a message was appended to a Kafka log.
</summary>
</member>
<member name="T:Confluent.Kafka.TopicMetadata">
<summary>
Metadata pertaining to a single Kafka topic.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicMetadata.#ctor(System.String,System.Collections.Generic.List{Confluent.Kafka.PartitionMetadata},Confluent.Kafka.Error)">
<summary>
Initializes a new TopicMetadata class instance.
</summary>
<param name="topic">
The topic name.
</param>
<param name="partitions">
Metadata for each of the topic's partitions.
</param>
<param name="error">
A rich <see cref="P:Confluent.Kafka.TopicMetadata.Error"/> object associated with the request for this topic metadata.
</param>
</member>
<member name="P:Confluent.Kafka.TopicMetadata.Topic">
<summary>
Gets the topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicMetadata.Partitions">
<summary>
Gets metadata for each of the topics partitions.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicMetadata.Error">
<summary>
A rich <see cref="P:Confluent.Kafka.TopicMetadata.Error"/> object associated with the request for this topic metadata.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicMetadata.ToString">
<summary>
Returns a JSON representation of the TopicMetadata object.
</summary>
<returns>
A JSON representation the TopicMetadata object.
</returns>
</member>
<member name="T:Confluent.Kafka.TopicPartition">
<summary>
Represents a Kafka (topic, partition) tuple.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartition.#ctor(System.String,Confluent.Kafka.Partition)">
<summary>
Initializes a new TopicPartition instance.
</summary>
<param name="topic">
A Kafka topic name.
</param>
<param name="partition">
A Kafka partition.
</param>
</member>
<member name="P:Confluent.Kafka.TopicPartition.Topic">
<summary>
Gets the Kafka topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartition.Partition">
<summary>
Gets the Kafka partition.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartition.Equals(System.Object)">
<summary>
Tests whether this TopicPartition instance is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is a TopicPartition and all properties are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartition.GetHashCode">
<summary>
Returns a hash code for this TopicPartition.
</summary>
<returns>
An integer that specifies a hash value for this TopicPartition.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartition.op_Equality(Confluent.Kafka.TopicPartition,Confluent.Kafka.TopicPartition)">
<summary>
Tests whether TopicPartition instance a is equal to TopicPartition instance b.
</summary>
<param name="a">
The first TopicPartition instance to compare.
</param>
<param name="b">
The second TopicPartition instance to compare.
</param>
<returns>
true if TopicPartition instances a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartition.op_Inequality(Confluent.Kafka.TopicPartition,Confluent.Kafka.TopicPartition)">
<summary>
Tests whether TopicPartition instance a is not equal to TopicPartition instance b.
</summary>
<param name="a">
The first TopicPartition instance to compare.
</param>
<param name="b">
The second TopicPartition instance to compare.
</param>
<returns>
true if TopicPartition instances a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartition.ToString">
<summary>
Returns a string representation of the TopicPartition object.
</summary>
<returns>
A string that represents the TopicPartition object.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartition.CompareTo(System.Object)">
<summary>
Compares the current instance with another object of the same type and returns
an integer that indicates whether the current instance precedes, follows, or
occurs in the same position in the sort order as the other object.
</summary>
<returns>
Less than zero: This instance precedes obj in the sort order.
Zero: This instance occurs in the same position in the sort order as obj.
Greater than zero: This instance follows obj in the sort order.
</returns>
</member>
<member name="T:Confluent.Kafka.TopicPartitionError">
<summary>
Represents a Kafka (topic, partition, error) tuple.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionError.#ctor(Confluent.Kafka.TopicPartition,Confluent.Kafka.Error)">
<summary>
Initializes a new TopicPartitionError instance.
</summary>
<param name="tp">
Kafka topic name and partition values.
</param>
<param name="error">
A Kafka error.
</param>
</member>
<member name="M:Confluent.Kafka.TopicPartitionError.#ctor(System.String,Confluent.Kafka.Partition,Confluent.Kafka.Error)">
<summary>
Initializes a new TopicPartitionError instance.
</summary>
<param name="topic">
A Kafka topic name.
</param>
<param name="partition">
A Kafka partition value.
</param>
<param name="error">
A Kafka error.
</param>
</member>
<member name="P:Confluent.Kafka.TopicPartitionError.Topic">
<summary>
Gets the Kafka topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionError.Partition">
<summary>
Gets the Kafka partition.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionError.Error">
<summary>
Gets the Kafka error.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionError.TopicPartition">
<summary>
Gets the TopicPartition component of this TopicPartitionError instance.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionError.Equals(System.Object)">
<summary>
Tests whether this TopicPartitionError instance is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is a TopicPartitionError and all properties are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionError.GetHashCode">
<summary>
Returns a hash code for this TopicPartitionError.
</summary>
<returns>
An integer that specifies a hash value for this TopicPartitionError.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionError.op_Equality(Confluent.Kafka.TopicPartitionError,Confluent.Kafka.TopicPartitionError)">
<summary>
Tests whether TopicPartitionError instance a is equal to TopicPartitionError instance b.
</summary>
<param name="a">
The first TopicPartitionError instance to compare.
</param>
<param name="b">
The second TopicPartitionError instance to compare.
</param>
<returns>
true if TopicPartitionError instances a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionError.op_Inequality(Confluent.Kafka.TopicPartitionError,Confluent.Kafka.TopicPartitionError)">
<summary>
Tests whether TopicPartitionError instance a is not equal to TopicPartitionError instance b.
</summary>
<param name="a">
The first TopicPartitionError instance to compare.
</param>
<param name="b">
The second TopicPartitionError instance to compare.
</param>
<returns>
true if TopicPartitionError instances a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionError.ToString">
<summary>
Returns a string representation of the TopicPartitionError object.
</summary>
<returns>
A string representation of the TopicPartitionError object.
</returns>
</member>
<member name="T:Confluent.Kafka.TopicPartitionOffset">
<summary>
Represents a Kafka (topic, partition, offset) tuple.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffset.#ctor(Confluent.Kafka.TopicPartition,Confluent.Kafka.Offset)">
<summary>
Initializes a new TopicPartitionOffset instance.
</summary>
<param name="tp">
Kafka topic name and partition.
</param>
<param name="offset">
A Kafka offset value.
</param>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffset.#ctor(System.String,Confluent.Kafka.Partition,Confluent.Kafka.Offset)">
<summary>
Initializes a new TopicPartitionOffset instance.
</summary>
<param name="topic">
A Kafka topic name.
</param>
<param name="partition">
A Kafka partition.
</param>
<param name="offset">
A Kafka offset value.
</param>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffset.Topic">
<summary>
Gets the Kafka topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffset.Partition">
<summary>
Gets the Kafka partition.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffset.Offset">
<summary>
Gets the Kafka partition offset value.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffset.TopicPartition">
<summary>
Gets the TopicPartition component of this TopicPartitionOffset instance.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffset.Equals(System.Object)">
<summary>
Tests whether this TopicPartitionOffset instance is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is a TopicPartitionOffset and all properties are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffset.GetHashCode">
<summary>
Returns a hash code for this TopicPartitionOffset.
</summary>
<returns>
An integer that specifies a hash value for this TopicPartitionOffset.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffset.op_Equality(Confluent.Kafka.TopicPartitionOffset,Confluent.Kafka.TopicPartitionOffset)">
<summary>
Tests whether TopicPartitionOffset instance a is equal to TopicPartitionOffset instance b.
</summary>
<param name="a">
The first TopicPartitionOffset instance to compare.
</param>
<param name="b">
The second TopicPartitionOffset instance to compare.
</param>
<returns>
true if TopicPartitionOffset instances a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffset.op_Inequality(Confluent.Kafka.TopicPartitionOffset,Confluent.Kafka.TopicPartitionOffset)">
<summary>
Tests whether TopicPartitionOffset instance a is not equal to TopicPartitionOffset instance b.
</summary>
<param name="a">
The first TopicPartitionOffset instance to compare.
</param>
<param name="b">
The second TopicPartitionOffset instance to compare.
</param>
<returns>
true if TopicPartitionOffset instances a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffset.ToString">
<summary>
Returns a string representation of the TopicPartitionOffset object.
</summary>
<returns>
A string that represents the TopicPartitionOffset object.
</returns>
</member>
<member name="T:Confluent.Kafka.TopicPartitionOffsetError">
<summary>
Represents a Kafka (topic, partition, offset, error) tuple.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.#ctor(Confluent.Kafka.TopicPartition,Confluent.Kafka.Offset,Confluent.Kafka.Error)">
<summary>
Initializes a new TopicPartitionOffsetError instance.
</summary>
<param name="tp">
Kafka topic name and partition values.
</param>
<param name="offset">
A Kafka offset value.
</param>
<param name="error">
A Kafka error.
</param>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.#ctor(Confluent.Kafka.TopicPartitionOffset,Confluent.Kafka.Error)">
<summary>
Initializes a new TopicPartitionOffsetError instance.
</summary>
<param name="tpo">
Kafka topic name, partition and offset values.
</param>
<param name="error">
A Kafka error.
</param>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.#ctor(System.String,Confluent.Kafka.Partition,Confluent.Kafka.Offset,Confluent.Kafka.Error)">
<summary>
Initializes a new TopicPartitionOffsetError instance.
</summary>
<param name="topic">
A Kafka topic name.
</param>
<param name="partition">
A Kafka partition value.
</param>
<param name="offset">
A Kafka offset value.
</param>
<param name="error">
A Kafka error.
</param>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffsetError.Topic">
<summary>
Gets the Kafka topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffsetError.Partition">
<summary>
Gets the Kafka partition.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffsetError.Offset">
<summary>
Gets the Kafka partition offset value.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffsetError.Error">
<summary>
Gets the Kafka error.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffsetError.TopicPartition">
<summary>
Gets the TopicPartition component of this TopicPartitionOffsetError instance.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionOffsetError.TopicPartitionOffset">
<summary>
Gets the TopicPartitionOffset component of this TopicPartitionOffsetError instance.
</summary>>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.Equals(System.Object)">
<summary>
Tests whether this TopicPartitionOffsetError instance is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is a TopicPartitionOffsetError and all properties are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.GetHashCode">
<summary>
Returns a hash code for this TopicPartitionOffsetError.
</summary>
<returns>
An integer that specifies a hash value for this TopicPartitionOffsetError.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.op_Equality(Confluent.Kafka.TopicPartitionOffsetError,Confluent.Kafka.TopicPartitionOffsetError)">
<summary>
Tests whether TopicPartitionOffsetError instance a is equal to TopicPartitionOffsetError instance b.
</summary>
<param name="a">
The first TopicPartitionOffsetError instance to compare.
</param>
<param name="b">
The second TopicPartitionOffsetError instance to compare.
</param>
<returns>
true if TopicPartitionOffsetError instances a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.op_Inequality(Confluent.Kafka.TopicPartitionOffsetError,Confluent.Kafka.TopicPartitionOffsetError)">
<summary>
Tests whether TopicPartitionOffsetError instance a is not equal to TopicPartitionOffsetError instance b.
</summary>
<param name="a">
The first TopicPartitionOffsetError instance to compare.
</param>
<param name="b">
The second TopicPartitionOffsetError instance to compare.
</param>
<returns>
true if TopicPartitionOffsetError instances a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.op_Explicit(Confluent.Kafka.TopicPartitionOffsetError)~Confluent.Kafka.TopicPartitionOffset">
<summary>
Converts TopicPartitionOffsetError instance to TopicPartitionOffset instance.
NOTE: Throws KafkaException if Error.Code != ErrorCode.NoError
</summary>
<param name="tpoe">
The TopicPartitionOffsetError instance to convert.
</param>
<returns>
TopicPartitionOffset instance converted from TopicPartitionOffsetError instance
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionOffsetError.ToString">
<summary>
Returns a string representation of the TopicPartitionOffsetError object.
</summary>
<returns>
A string representation of the TopicPartitionOffsetError object.
</returns>
</member>
<member name="T:Confluent.Kafka.TopicPartitionTimestamp">
<summary>
Represents a Kafka (topic, partition, timestamp) tuple.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionTimestamp.#ctor(Confluent.Kafka.TopicPartition,Confluent.Kafka.Timestamp)">
<summary>
Initializes a new TopicPartitionTimestamp instance.
</summary>
<param name="tp">
Kafka topic name and partition.
</param>
<param name="timestamp">
A Kafka timestamp value.
</param>
</member>
<member name="M:Confluent.Kafka.TopicPartitionTimestamp.#ctor(System.String,Confluent.Kafka.Partition,Confluent.Kafka.Timestamp)">
<summary>
Initializes a new TopicPartitionTimestamp instance.
</summary>
<param name="topic">
A Kafka topic name.
</param>
<param name="partition">
A Kafka partition.
</param>
<param name="timestamp">
A Kafka timestamp value.
</param>
</member>
<member name="P:Confluent.Kafka.TopicPartitionTimestamp.Topic">
<summary>
Gets the Kafka topic name.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionTimestamp.Partition">
<summary>
Gets the Kafka partition.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionTimestamp.Timestamp">
<summary>
Gets the Kafka timestamp.
</summary>
</member>
<member name="P:Confluent.Kafka.TopicPartitionTimestamp.TopicPartition">
<summary>
Gets the TopicPartition component of this TopicPartitionTimestamp instance.
</summary>
</member>
<member name="M:Confluent.Kafka.TopicPartitionTimestamp.Equals(System.Object)">
<summary>
Tests whether this TopicPartitionTimestamp instance is equal to the specified object.
</summary>
<param name="obj">
The object to test.
</param>
<returns>
true if obj is a TopicPartitionTimestamp and all properties are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionTimestamp.GetHashCode">
<summary>
Returns a hash code for this TopicPartitionTimestamp.
</summary>
<returns>
An integer that specifies a hash value for this TopicPartitionTimestamp.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionTimestamp.op_Equality(Confluent.Kafka.TopicPartitionTimestamp,Confluent.Kafka.TopicPartitionTimestamp)">
<summary>
Tests whether TopicPartitionTimestamp instance a is equal to TopicPartitionTimestamp instance b.
</summary>
<param name="a">
The first TopicPartitionTimestamp instance to compare.
</param>
<param name="b">
The second TopicPartitionTimestamp instance to compare.
</param>
<returns>
true if TopicPartitionTimestamp instances a and b are equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionTimestamp.op_Inequality(Confluent.Kafka.TopicPartitionTimestamp,Confluent.Kafka.TopicPartitionTimestamp)">
<summary>
Tests whether TopicPartitionTimestamp instance a is not equal to TopicPartitionTimestamp instance b.
</summary>
<param name="a">
The first TopicPartitionTimestamp instance to compare.
</param>
<param name="b">
The second TopicPartitionTimestamp instance to compare.
</param>
<returns>
true if TopicPartitionTimestamp instances a and b are not equal. false otherwise.
</returns>
</member>
<member name="M:Confluent.Kafka.TopicPartitionTimestamp.ToString">
<summary>
Returns a string representation of the TopicPartitionTimestamp object.
</summary>
<returns>
A string that represents the TopicPartitionTimestamp object.
</returns>
</member>
<member name="T:Confluent.Kafka.WatermarkOffsets">
<summary>
Represents the low and high watermark offsets of a Kafka
topic/partition.
</summary>
<remarks>
You can identify a partition that has not yet been written
to by checking if the high watermark equals 0.
</remarks>
</member>
<member name="M:Confluent.Kafka.WatermarkOffsets.#ctor(Confluent.Kafka.Offset,Confluent.Kafka.Offset)">
<summary>
Initializes a new instance of the WatermarkOffsets class
with the specified offsets.
</summary>
<param name="low">
The offset of the earliest message in the topic/partition. If
no messages have been written to the topic, the low watermark
offset is set to 0. The low watermark will also be 0 if
one message has been written to the partition (with offset 0).
</param>
<param name="high">
The high watermark offset, which is the offset of the latest
message in the topic/partition available for consumption + 1.
</param>
</member>
<member name="P:Confluent.Kafka.WatermarkOffsets.Low">
<summary>
Gets the offset of the earliest message in the topic/partition. If
no messages have been written to the topic, the low watermark
offset is set to 0. The low watermark will also be 0 if
one message has been written to the partition (with offset 0).
</summary>
</member>
<member name="P:Confluent.Kafka.WatermarkOffsets.High">
<summary>
Gets the high watermark offset, which is the offset of the latest
message in the topic/partition available for consumption + 1.
</summary>
</member>
<member name="M:Confluent.Kafka.WatermarkOffsets.ToString">
<summary>
Returns a string representation of the WatermarkOffsets object.
</summary>
<returns>
A string representation of the WatermarkOffsets object.
</returns>
</member>
</members>
</doc>