aboutsummaryrefslogtreecommitdiffstats
path: root/epan/dissectors/packet-kafka.c
diff options
context:
space:
mode:
authorPiotr Smolinski <piotr.smolinski@confluent.io>2019-05-02 16:31:32 -0700
committerAnders Broman <a.broman58@gmail.com>2019-08-20 06:38:52 +0000
commiteabd1b0abb38fdd92147fba8a9030fe0bcd8fc6a (patch)
tree19cfa1f49d51f458fe65f810612cfb5fe14bfe86 /epan/dissectors/packet-kafka.c
parentd148bbec5fb00629b20ebbcb7375ef734002efd0 (diff)
Kafka: add support for Kafka 2.3+ dissection
Existing Apache Kafka support in Wireshark ends at version 0.10. The version 0.11 (June 2017) brought significant changes to the message format. This change makes the Wireshark Kafka dissector obsolete. The recently released Kafka 2.3 has a lot of additions to the wire protocol, which should be also addressed. Major changes: * Applied Kafka protocol changes since 0.10 * Zstd-packed message decompression (since Kafka 2.1) * Added support for Kafka over TLS decryption Bug: 15988 Change-Id: I2bba2cfefa884638b6d4d6f32ce7d016cbba0e28 Reviewed-on: https://code.wireshark.org/review/34224 Petri-Dish: Anders Broman <a.broman58@gmail.com> Tested-by: Petri Dish Buildbot Reviewed-by: Anders Broman <a.broman58@gmail.com>
Diffstat (limited to 'epan/dissectors/packet-kafka.c')
-rw-r--r--epan/dissectors/packet-kafka.c5834
1 files changed, 5318 insertions, 516 deletions
diff --git a/epan/dissectors/packet-kafka.c b/epan/dissectors/packet-kafka.c
index a907cda5d4..ccc90005d3 100644
--- a/epan/dissectors/packet-kafka.c
+++ b/epan/dissectors/packet-kafka.c
@@ -1,9 +1,10 @@
/* packet-kafka.c
- * Routines for Kafka Protocol dissection (version 0.8 - 0.10.1.0)
+ * Routines for Apache Kafka Protocol dissection (version 0.8 - 2.3)
* Copyright 2013, Evan Huus <eapache@gmail.com>
+ * Update from Kafka 0.10.1.0 to 2.3 by Piotr Smolinski <piotr.smolinski@confluent.io>
*
* https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
- * http://kafka.apache.org/protocol.html
+ * https://kafka.apache.org/protocol.html
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
@@ -26,12 +27,17 @@
#include <lz4frame.h>
#endif
#include "packet-tcp.h"
+#include "packet-tls.h"
void proto_register_kafka(void);
void proto_reg_handoff_kafka(void);
static int proto_kafka = -1;
+static dissector_handle_t kafka_handle;
+
static int hf_kafka_len = -1;
+static int hf_kafka_api_key = -1;
+static int hf_kafka_api_version = -1;
static int hf_kafka_request_api_key = -1;
static int hf_kafka_response_api_key = -1;
static int hf_kafka_request_api_version = -1;
@@ -41,20 +47,22 @@ static int hf_kafka_client_id = -1;
static int hf_kafka_client_host = -1;
static int hf_kafka_string_len = -1;
static int hf_kafka_bytes_len = -1;
-static int hf_kafka_array_count = -1;
static int hf_kafka_required_acks = -1;
static int hf_kafka_timeout = -1;
static int hf_kafka_topic_name = -1;
static int hf_kafka_transactional_id = -1;
+static int hf_kafka_transaction_result = -1;
+static int hf_kafka_transaction_timeout = -1;
static int hf_kafka_partition_id = -1;
static int hf_kafka_replica = -1;
static int hf_kafka_replication_factor = -1;
static int hf_kafka_isr = -1;
-static int hf_kafka_partition_leader = -1;
+static int hf_kafka_offline = -1;
static int hf_kafka_last_stable_offset = -1;
static int hf_kafka_log_start_offset = -1;
static int hf_kafka_first_offset = -1;
static int hf_kafka_producer_id = -1;
+static int hf_kafka_producer_epoch = -1;
static int hf_kafka_message_set_size = -1;
static int hf_kafka_message_size = -1;
static int hf_kafka_message_crc = -1;
@@ -62,6 +70,17 @@ static int hf_kafka_message_magic = -1;
static int hf_kafka_message_codec = -1;
static int hf_kafka_message_timestamp_type = -1;
static int hf_kafka_message_timestamp = -1;
+static int hf_kafka_batch_crc = -1;
+static int hf_kafka_batch_magic = -1;
+static int hf_kafka_batch_codec = -1;
+static int hf_kafka_batch_timestamp_type = -1;
+static int hf_kafka_batch_transactional = -1;
+static int hf_kafka_batch_control_batch = -1;
+static int hf_kafka_batch_last_offset_delta = -1;
+static int hf_kafka_batch_first_timestamp = -1;
+static int hf_kafka_batch_last_timestamp = -1;
+static int hf_kafka_batch_base_sequence = -1;
+static int hf_kafka_batch_size = -1;
static int hf_kafka_message_key = -1;
static int hf_kafka_message_value = -1;
static int hf_kafka_message_value_compressed = -1;
@@ -69,16 +88,22 @@ static int hf_kafka_message_compression_reduction = -1;
static int hf_kafka_request_frame = -1;
static int hf_kafka_response_frame = -1;
static int hf_kafka_consumer_group = -1;
+static int hf_kafka_consumer_group_instance = -1;
+static int hf_kafka_coordinator_key = -1;
+static int hf_kafka_coordinator_type = -1;
static int hf_kafka_group_state = -1;
static int hf_kafka_offset = -1;
static int hf_kafka_offset_time = -1;
static int hf_kafka_max_offsets = -1;
static int hf_kafka_metadata = -1;
static int hf_kafka_error = -1;
+static int hf_kafka_error_message = -1;
static int hf_kafka_broker_nodeid = -1;
+static int hf_kafka_broker_epoch = -1;
static int hf_kafka_broker_host = -1;
+static int hf_kafka_listener_name = -1;
static int hf_kafka_broker_port = -1;
-static int hf_kafka_broker_rack = -1;
+static int hf_kafka_rack = -1;
static int hf_kafka_broker_security_protocol_type = -1;
static int hf_kafka_cluster_id = -1;
static int hf_kafka_controller_id = -1;
@@ -87,6 +112,7 @@ static int hf_kafka_delete_partitions = -1;
static int hf_kafka_leader_id = -1;
static int hf_kafka_group_leader_id = -1;
static int hf_kafka_leader_epoch = -1;
+static int hf_kafka_current_leader_epoch = -1;
static int hf_kafka_is_internal = -1;
static int hf_kafka_isolation_level = -1;
static int hf_kafka_min_bytes = -1;
@@ -112,21 +138,74 @@ static int hf_kafka_config_key = -1;
static int hf_kafka_config_value = -1;
static int hf_kafka_commit_timestamp = -1;
static int hf_kafka_retention_time = -1;
+static int hf_kafka_forgotten_topic_name = -1;
+static int hf_kafka_forgotten_topic_partition = -1;
+static int hf_kafka_fetch_session_id = -1;
+static int hf_kafka_fetch_session_epoch = -1;
+static int hf_kafka_record_header_key = -1;
+static int hf_kafka_record_header_value = -1;
+static int hf_kafka_record_headers_count = -1;
+static int hf_kafka_record_length = -1;
+static int hf_kafka_record_attributes = -1;
+static int hf_kafka_allow_auto_topic_creation = -1;
+static int hf_kafka_validate_only = -1;
+static int hf_kafka_coordinator_epoch = -1;
+static int hf_kafka_sasl_auth_bytes = -1;
+static int hf_kafka_session_lifetime_ms = -1;
+static int hf_kafka_acl_resource_type = -1;
+static int hf_kafka_acl_resource_name = -1;
+static int hf_kafka_acl_resource_pattern_type = -1;
+static int hf_kafka_acl_principal = -1;
+static int hf_kafka_acl_host = -1;
+static int hf_kafka_acl_operation = -1;
+static int hf_kafka_acl_permission_type = -1;
+static int hf_kafka_config_resource_type = -1;
+static int hf_kafka_config_resource_name = -1;
+static int hf_kafka_config_include_synonyms = -1;
+static int hf_kafka_config_source = -1;
+static int hf_kafka_config_readonly = -1;
+static int hf_kafka_config_default = -1;
+static int hf_kafka_config_sensitive = -1;
+static int hf_kafka_config_operation = -1;
+static int hf_kafka_log_dir = -1;
+static int hf_kafka_segment_size = -1;
+static int hf_kafka_offset_lag = -1;
+static int hf_kafka_future = -1;
+static int hf_kafka_partition_count = -1;
+static int hf_kafka_token_max_life_time = -1;
+static int hf_kafka_token_renew_time = -1;
+static int hf_kafka_token_expiry_time = -1;
+static int hf_kafka_token_principal_type = -1;
+static int hf_kafka_token_principal_name = -1;
+static int hf_kafka_token_issue_timestamp = -1;
+static int hf_kafka_token_expiry_timestamp = -1;
+static int hf_kafka_token_max_timestamp = -1;
+static int hf_kafka_token_id = -1;
+static int hf_kafka_token_hmac = -1;
+static int hf_kafka_include_cluster_authorized_ops = -1;
+static int hf_kafka_include_topic_authorized_ops = -1;
+static int hf_kafka_include_group_authorized_ops = -1;
+static int hf_kafka_cluster_authorized_ops = -1;
+static int hf_kafka_topic_authorized_ops = -1;
+static int hf_kafka_group_authorized_ops = -1;
+static int hf_kafka_election_type = -1;
static int ett_kafka = -1;
+static int ett_kafka_batch = -1;
static int ett_kafka_message = -1;
static int ett_kafka_message_set = -1;
static int ett_kafka_replicas = -1;
static int ett_kafka_isrs = -1;
+static int ett_kafka_offline = -1;
static int ett_kafka_broker = -1;
static int ett_kafka_brokers = -1;
static int ett_kafka_broker_end_point = -1;
+static int ett_kafka_markers = -1;
+static int ett_kafka_marker = -1;
static int ett_kafka_topics = -1;
static int ett_kafka_topic = -1;
-static int ett_kafka_request_topic = -1;
-static int ett_kafka_request_partition = -1;
-static int ett_kafka_response_topic = -1;
-static int ett_kafka_response_partition = -1;
+static int ett_kafka_partitions = -1;
+static int ett_kafka_partition = -1;
static int ett_kafka_api_version = -1;
static int ett_kafka_group_protocols = -1;
static int ett_kafka_group_protocol = -1;
@@ -134,11 +213,40 @@ static int ett_kafka_group_members = -1;
static int ett_kafka_group_member = -1;
static int ett_kafka_group_assignments = -1;
static int ett_kafka_group_assignment = -1;
+static int ett_kafka_groups = -1;
static int ett_kafka_group = -1;
static int ett_kafka_sasl_enabled_mechanisms = -1;
static int ett_kafka_replica_assignment = -1;
static int ett_kafka_configs = -1;
static int ett_kafka_config = -1;
+static int ett_kafka_request_forgotten_topic = -1;
+static int ett_kafka_record = -1;
+static int ett_kafka_record_headers = -1;
+static int ett_kafka_record_headers_header = -1;
+static int ett_kafka_aborted_transactions = -1;
+static int ett_kafka_aborted_transaction = -1;
+static int ett_kafka_resources = -1;
+static int ett_kafka_resource = -1;
+static int ett_kafka_acls = -1;
+static int ett_kafka_acl = -1;
+static int ett_kafka_acl_creations = -1;
+static int ett_kafka_acl_creation = -1;
+static int ett_kafka_acl_filters = -1;
+static int ett_kafka_acl_filter = -1;
+static int ett_kafka_acl_filter_matches = -1;
+static int ett_kafka_acl_filter_match = -1;
+static int ett_kafka_config_synonyms = -1;
+static int ett_kafka_config_synonym = -1;
+static int ett_kafka_config_entries = -1;
+static int ett_kafka_config_entry = -1;
+static int ett_kafka_log_dirs = -1;
+static int ett_kafka_log_dir = -1;
+static int ett_kafka_renewers = -1;
+static int ett_kafka_renewer = -1;
+static int ett_kafka_owners = -1;
+static int ett_kafka_owner = -1;
+static int ett_kafka_tokens = -1;
+static int ett_kafka_token = -1;
static expert_field ei_kafka_request_missing = EI_INIT;
static expert_field ei_kafka_unknown_api_key = EI_INIT;
@@ -146,6 +254,7 @@ static expert_field ei_kafka_unsupported_api_version = EI_INIT;
static expert_field ei_kafka_message_decompress = EI_INIT;
static expert_field ei_kafka_bad_string_length = EI_INIT;
static expert_field ei_kafka_bad_bytes_length = EI_INIT;
+static expert_field ei_kafka_unknown_message_magic = EI_INIT;
typedef gint16 kafka_api_key_t;
typedef gint16 kafka_api_version_t;
@@ -161,69 +270,154 @@ typedef struct _kafka_api_info_t {
kafka_api_version_t max_version;
} kafka_api_info_t;
-#define KAFKA_PRODUCE 0
-#define KAFKA_FETCH 1
-#define KAFKA_OFFSETS 2
-#define KAFKA_METADATA 3
-#define KAFKA_LEADER_AND_ISR 4
-#define KAFKA_STOP_REPLICA 5
-#define KAFKA_UPDATE_METADATA 6
-#define KAFKA_CONTROLLED_SHUTDOWN 7
-#define KAFKA_OFFSET_COMMIT 8
-#define KAFKA_OFFSET_FETCH 9
-#define KAFKA_GROUP_COORDINATOR 10
-#define KAFKA_JOIN_GROUP 11
-#define KAFKA_HEARTBEAT 12
-#define KAFKA_LEAVE_GROUP 13
-#define KAFKA_SYNC_GROUP 14
-#define KAFKA_DESCRIBE_GROUPS 15
-#define KAFKA_LIST_GROUPS 16
-#define KAFKA_SASL_HANDSHAKE 17
-#define KAFKA_API_VERSIONS 18
-#define KAFKA_CREATE_TOPICS 19
-#define KAFKA_DELETE_TOPICS 20
+#define KAFKA_TCP_DEFAULT_RANGE "9092"
+
+#define KAFKA_PRODUCE 0
+#define KAFKA_FETCH 1
+#define KAFKA_OFFSETS 2
+#define KAFKA_METADATA 3
+#define KAFKA_LEADER_AND_ISR 4
+#define KAFKA_STOP_REPLICA 5
+#define KAFKA_UPDATE_METADATA 6
+#define KAFKA_CONTROLLED_SHUTDOWN 7
+#define KAFKA_OFFSET_COMMIT 8
+#define KAFKA_OFFSET_FETCH 9
+#define KAFKA_FIND_COORDINATOR 10
+#define KAFKA_JOIN_GROUP 11
+#define KAFKA_HEARTBEAT 12
+#define KAFKA_LEAVE_GROUP 13
+#define KAFKA_SYNC_GROUP 14
+#define KAFKA_DESCRIBE_GROUPS 15
+#define KAFKA_LIST_GROUPS 16
+#define KAFKA_SASL_HANDSHAKE 17
+#define KAFKA_API_VERSIONS 18
+#define KAFKA_CREATE_TOPICS 19
+#define KAFKA_DELETE_TOPICS 20
+#define KAFKA_DELETE_RECORDS 21
+#define KAFKA_INIT_PRODUCER_ID 22
+#define KAFKA_OFFSET_FOR_LEADER_EPOCH 23
+#define KAFKA_ADD_PARTITIONS_TO_TXN 24
+#define KAFKA_ADD_OFFSETS_TO_TXN 25
+#define KAFKA_END_TXN 26
+#define KAFKA_WRITE_TXN_MARKERS 27
+#define KAFKA_TXN_OFFSET_COMMIT 28
+#define KAFKA_DESCRIBE_ACLS 29
+#define KAFKA_CREATE_ACLS 30
+#define KAFKA_DELETE_ACLS 31
+#define KAFKA_DESCRIBE_CONFIGS 32
+#define KAFKA_ALTER_CONFIGS 33
+#define KAFKA_ALTER_REPLICA_LOG_DIRS 34
+#define KAFKA_DESCRIBE_LOG_DIRS 35
+#define KAFKA_SASL_AUTHENTICATE 36
+#define KAFKA_CREATE_PARTITIONS 37
+#define KAFKA_CREATE_DELEGATION_TOKEN 38
+#define KAFKA_RENEW_DELEGATION_TOKEN 39
+#define KAFKA_EXPIRE_DELEGATION_TOKEN 40
+#define KAFKA_DESCRIBE_DELEGATION_TOKEN 41
+#define KAFKA_DELETE_GROUPS 42
+#define KAFKA_ELECT_LEADERS 43
+#define KAFKA_INC_ALTER_CONFIGS 44
+#define KAFKA_ALTER_PARTITION_REASSIGNMENTS 45
+#define KAFKA_LIST_PARTITION_REASSIGNMENTS 46
+
+/*
+ * Check for message changes here:
+ * https://github.com/apache/kafka/tree/trunk/clients/src/main/resources/common/message
+ */
static const kafka_api_info_t kafka_apis[] = {
- { KAFKA_PRODUCE, "Produce",
+ { KAFKA_PRODUCE, "Produce",
+ 0, 7 },
+ { KAFKA_FETCH, "Fetch",
+ 0, 11 },
+ { KAFKA_OFFSETS, "Offsets",
0, 5 },
- { KAFKA_FETCH, "Fetch",
- 0, 6 },
- { KAFKA_OFFSETS, "Offsets",
+ { KAFKA_METADATA, "Metadata",
+ 0, 8 },
+ { KAFKA_LEADER_AND_ISR, "LeaderAndIsr",
+ 0, 3 },
+ { KAFKA_STOP_REPLICA, "StopReplica",
0, 1 },
- { KAFKA_METADATA, "Metadata",
+ { KAFKA_UPDATE_METADATA, "UpdateMetadata",
+ 0, 5 },
+ { KAFKA_CONTROLLED_SHUTDOWN, "ControlledShutdown",
0, 2 },
- { KAFKA_LEADER_AND_ISR, "LeaderAndIsr",
- 0, 0 },
- { KAFKA_STOP_REPLICA, "StopReplica",
- 0, 0 },
- { KAFKA_UPDATE_METADATA, "UpdateMetadata",
+ { KAFKA_OFFSET_COMMIT, "OffsetCommit",
+ 0, 7 },
+ { KAFKA_OFFSET_FETCH, "OffsetFetch",
+ 0, 5 },
+ { KAFKA_FIND_COORDINATOR, "FindCoordinator",
+ 0, 2 },
+ { KAFKA_JOIN_GROUP, "JoinGroup",
+ 0, 5 },
+ { KAFKA_HEARTBEAT, "Heartbeat",
+ 0, 3 },
+ { KAFKA_LEAVE_GROUP, "LeaveGroup",
+ 0, 3 },
+ { KAFKA_SYNC_GROUP, "SyncGroup",
+ 0, 3 },
+ { KAFKA_DESCRIBE_GROUPS, "DescribeGroups",
+ 0, 4 },
+ { KAFKA_LIST_GROUPS, "ListGroups",
+ 0, 2 },
+ { KAFKA_SASL_HANDSHAKE, "SaslHandshake",
+ 0, 1 },
+ { KAFKA_API_VERSIONS, "ApiVersions",
0, 2 },
- { KAFKA_CONTROLLED_SHUTDOWN, "ControlledShutdown",
- 1, 1 },
- { KAFKA_OFFSET_COMMIT, "OffsetCommit",
+ { KAFKA_CREATE_TOPICS, "CreateTopics",
+ 0, 4 },
+ { KAFKA_DELETE_TOPICS, "DeleteTopics",
0, 3 },
- { KAFKA_OFFSET_FETCH, "OffsetFetch",
+ { KAFKA_DELETE_RECORDS, "DeleteRecords",
0, 1 },
- { KAFKA_GROUP_COORDINATOR, "GroupCoordinator",
- 0, 0 },
- { KAFKA_JOIN_GROUP, "JoinGroup",
+ { KAFKA_INIT_PRODUCER_ID, "InitProducerId",
0, 1 },
- { KAFKA_HEARTBEAT, "Heartbeat",
+ { KAFKA_OFFSET_FOR_LEADER_EPOCH, "OffsetForLeaderEpoch",
+ 0, 3 },
+ { KAFKA_ADD_PARTITIONS_TO_TXN, "AddPartitionsToTxn",
0, 1 },
- { KAFKA_LEAVE_GROUP, "LeaveGroup",
- 0, 0 },
- { KAFKA_SYNC_GROUP, "SyncGroup",
- 0, 0 },
- { KAFKA_DESCRIBE_GROUPS, "DescribeGroups",
- 0, 0 },
- { KAFKA_LIST_GROUPS, "ListGroups",
- 0, 0 },
- { KAFKA_SASL_HANDSHAKE, "SaslHandshake",
+ { KAFKA_ADD_OFFSETS_TO_TXN, "AddOffsetsToTxn",
+ 0, 1 },
+ { KAFKA_END_TXN, "EndTxn",
+ 0, 1 },
+ { KAFKA_WRITE_TXN_MARKERS, "WriteTxnMarkers",
0, 0 },
- { KAFKA_API_VERSIONS, "ApiVersions",
+ { KAFKA_TXN_OFFSET_COMMIT, "TxnOffsetCommit",
+ 0, 2 },
+ { KAFKA_DESCRIBE_ACLS, "DescribeAcls",
+ 0, 1 },
+ { KAFKA_CREATE_ACLS, "CreateAcls",
+ 0, 1 },
+ { KAFKA_DELETE_ACLS, "DeleteAcls",
+ 0, 1 },
+ { KAFKA_DESCRIBE_CONFIGS, "DescribeConfigs",
+ 0, 2 },
+ { KAFKA_ALTER_CONFIGS, "AlterConfigs",
+ 0, 1 },
+ { KAFKA_ALTER_REPLICA_LOG_DIRS, "AlterReplicaLogDirs",
+ 0, 1 },
+ { KAFKA_DESCRIBE_LOG_DIRS, "DescribeLogDirs",
+ 0, 1 },
+ { KAFKA_SASL_AUTHENTICATE, "SaslAuthenticate",
+ 0, 1 },
+ { KAFKA_CREATE_PARTITIONS, "CreatePartitions",
+ 0, 1 },
+ { KAFKA_CREATE_DELEGATION_TOKEN, "CreateDelegationToken",
+ 0, 1 },
+ { KAFKA_RENEW_DELEGATION_TOKEN, "RenewDelegationToken",
+ 0, 1 },
+ { KAFKA_EXPIRE_DELEGATION_TOKEN, "ExpireDelegationToken",
+ 0, 1 },
+ { KAFKA_DESCRIBE_DELEGATION_TOKEN, "DescribeDelegationToken",
+ 0, 1 },
+ { KAFKA_DELETE_GROUPS, "DeleteGroups",
+ 0, 1 },
+ { KAFKA_ELECT_LEADERS, "ElectLeaders",
+ 0, 1 },
+ { KAFKA_INC_ALTER_CONFIGS, "IncrementalAlterConfigs",
0, 0 },
- { KAFKA_CREATE_TOPICS, "CreateTopics",
+ { KAFKA_ALTER_PARTITION_REASSIGNMENTS, "AlterPartitionReassignments",
0, 0 },
- { KAFKA_DELETE_TOPICS, "DeleteTopics",
+ { KAFKA_LIST_PARTITION_REASSIGNMENTS, "ListPartitionReassignments",
0, 0 },
};
@@ -232,6 +426,10 @@ static const kafka_api_info_t kafka_apis[] = {
*/
static value_string kafka_api_names[array_length(kafka_apis) + 1];
+/*
+ * For the current list of error codes check here:
+ * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java
+ */
static const value_string kafka_errors[] = {
{ -1, "Unexpected Server Error" },
{ 0, "No Error" },
@@ -247,7 +445,7 @@ static const value_string kafka_errors[] = {
{ 11, "Stale Controller Epoch Code" },
{ 12, "Offset Metadata Too Large" },
{ 14, "Offsets Load In Progress" },
- { 15, "Consumer Coordinator Not Available" },
+ { 15, "The Coordinator is not Available" },
{ 16, "Not Coordinator For Consumer" },
{ 17, "Invalid topic" },
{ 18, "Message batch larger than configured server segment size" },
@@ -275,6 +473,49 @@ static const value_string kafka_errors[] = {
{ 40, "Invalid configuration" },
{ 41, "Not controller" },
{ 42, "Invalid request" },
+ { 43, "Unsupported for Message Format" },
+ { 44, "Policy Violation" },
+ { 45, "Out of Order Sequence Number" },
+ { 46, "Duplicate Sequence Number" },
+ { 47, "Invalid Producer Epoch" },
+ { 48, "Invalid Transaction State" },
+ { 49, "Invalid Producer ID Mapping" },
+ { 50, "Invalid Transaction Timeout" },
+ { 51, "Concurrent Transactions" },
+ { 52, "Transaction Coordinator Fenced" },
+ { 53, "Transactional ID Authorization Failed" },
+ { 54, "Security Disabled" },
+ { 55, "Operation not Attempted" },
+ { 56, "Kafka Storage Error" },
+ { 57, "Log Directory not Found" },
+ { 58, "SASL Authentication failed" },
+ { 59, "Unknown Producer ID" },
+ { 60, "Partition Reassignment in Progress" },
+ { 61, "Delegation Token Auth Disabled" },
+ { 62, "Delegation Token not Found" },
+ { 63, "Delegation Token Owner Mismatch" },
+ { 64, "Delegation Token Request not Allowed" },
+ { 65, "Delegation Token Authorization Failed" },
+ { 66, "Delegation Token Expired" },
+ { 67, "Supplied Principal Type Unsupported" },
+ { 68, "Not Empty Group" },
+ { 69, "Group ID not Found" },
+ { 70, "Fetch Session ID not Found" },
+ { 71, "Invalid Fetch Session Epoch" },
+ { 72, "Listener not Found" },
+ { 73, "Topic Deletion Disabled" },
+ { 74, "Fenced Leader Epoch" },
+ { 75, "Unknown Leader Epoch" },
+ { 76, "Unsupported Compression Type" },
+ { 77, "Stale Broker Epoch" },
+ { 78, "Offset not Available" },
+ { 79, "Member ID Required" },
+ { 80, "Preferred Leader not Available" },
+ { 81, "Group Max Size Reached" },
+ { 82, "Fenced Instance ID" },
+ { 83, "Eligible topic partition leaders are not available" },
+ { 84, "Leader election not needed for topic partition" },
+ { 85, "No partition reassignment is in progress" },
{ 0, NULL }
};
@@ -293,11 +534,13 @@ static const value_string kafka_acks[] = {
#define KAFKA_MESSAGE_CODEC_GZIP 1
#define KAFKA_MESSAGE_CODEC_SNAPPY 2
#define KAFKA_MESSAGE_CODEC_LZ4 3
+#define KAFKA_MESSAGE_CODEC_ZSTD 4
static const value_string kafka_message_codecs[] = {
{ KAFKA_MESSAGE_CODEC_NONE, "None" },
{ KAFKA_MESSAGE_CODEC_GZIP, "Gzip" },
{ KAFKA_MESSAGE_CODEC_SNAPPY, "Snappy" },
{ KAFKA_MESSAGE_CODEC_LZ4, "LZ4" },
+ { KAFKA_MESSAGE_CODEC_ZSTD, "Zstd" },
{ 0, NULL }
};
#ifdef HAVE_SNAPPY
@@ -311,6 +554,26 @@ static const value_string kafka_message_timestamp_types[] = {
{ 0, NULL }
};
+#define KAFKA_BATCH_TRANSACTIONAL_MASK 0x10
+static const value_string kafka_batch_transactional_values[] = {
+ { 0, "Non-transactional" },
+ { 1, "Transactional" },
+ { 0, NULL }
+};
+
+#define KAFKA_BATCH_CONTROL_BATCH_MASK 0x20
+static const value_string kafka_batch_control_batch_values[] = {
+ { 0, "Data batch" },
+ { 1, "Control batch" },
+ { 0, NULL }
+};
+
+static const value_string kafka_coordinator_types[] = {
+ { 0, "Group" },
+ { 1, "Transaction" },
+ { 0, NULL }
+};
+
static const value_string kafka_security_protocol_types[] = {
{ 0, "PLAINTEXT" },
{ 1, "SSL" },
@@ -319,8 +582,93 @@ static const value_string kafka_security_protocol_types[] = {
{ 0, NULL }
};
-/* List/range of TCP ports to register */
-static range_t *current_kafka_tcp_range = NULL;
+static const value_string kafka_isolation_levels[] = {
+ { 0, "Read Uncommitted" },
+ { 1, "Read Committed" },
+ { 0, NULL }
+};
+
+static const value_string kafka_transaction_results[] = {
+ { 0, "ABORT" },
+ { 1, "COMMIT" },
+ { 0, NULL }
+};
+
+static const value_string acl_resource_types[] = {
+ { 0, "Unknown" },
+ { 1, "Any" },
+ { 2, "Topic" },
+ { 3, "Group" },
+ { 4, "Cluster" },
+ { 5, "TransactionalId" },
+ { 6, "DelegationToken" },
+ { 0, NULL }
+};
+
+static const value_string acl_resource_pattern_types[] = {
+ { 0, "Unknown" },
+ { 1, "Any" },
+ { 2, "Match" },
+ { 3, "Literal" },
+ { 4, "Prefixed" },
+ { 0, NULL }
+};
+
+static const value_string acl_operations[] = {
+ { 0, "Unknown" },
+ { 1, "Any" },
+ { 2, "All" },
+ { 3, "Read" },
+ { 4, "Write" },
+ { 5, "Create" },
+ { 6, "Delete" },
+ { 7, "Alter" },
+ { 8, "Describe" },
+ { 9, "Cluster Action" },
+ { 10, "Describe Configs" },
+ { 11, "Alter Configs" },
+ { 12, "Idempotent Write" },
+ { 0, NULL }
+};
+
+static const value_string acl_permission_types[] = {
+ { 0, "Unknown" },
+ { 1, "Any" },
+ { 2, "Deny" },
+ { 3, "Allow" },
+ { 0, NULL }
+};
+
+static const value_string config_resource_types[] = {
+ { 0, "Unknown" },
+ { 2, "Topic" },
+ { 4, "Broker" },
+ { 0, NULL }
+};
+
+static const value_string config_sources[] = {
+ { 0, "Unknown" },
+ { 1, "Topic" },
+ { 2, "Broker (Dynamic)" },
+ { 3, "Broker (Dynamic/Default)" },
+ { 4, "Broker (Static)" },
+ { 5, "Default" },
+ { 0, NULL }
+};
+
+static const value_string config_operations[] = {
+ { 0, "Set" },
+ { 1, "Delete" },
+ { 2, "Append" },
+ { 3, "Subtract" },
+ { 0, NULL }
+};
+
+static const value_string election_types[] = {
+ { 0, "Preferred" },
+ { 1, "Unclean" },
+ { 0, NULL }
+};
/* Whether to show the lengths of string and byte fields in the protocol tree.
* It can be useful to see these, but they do clutter up the display, so disable
@@ -500,7 +848,7 @@ static void
kafka_check_supported_api_key(packet_info *pinfo, proto_item *ti, kafka_query_response_t *matcher)
{
if (kafka_get_api_info(matcher->api_key) == NULL) {
- col_append_fstr(pinfo->cinfo, COL_INFO, " [Unknown API key]");
+ col_append_str(pinfo->cinfo, COL_INFO, " [Unknown API key]");
expert_add_info_format(pinfo, ti, &ei_kafka_unknown_api_key,
"%s API key", kafka_api_key_to_str(matcher->api_key));
}
@@ -513,7 +861,7 @@ kafka_check_supported_api_version(packet_info *pinfo, proto_item *ti, kafka_quer
api_info = kafka_get_api_info(matcher->api_key);
if (api_info != NULL && !kafka_is_api_version_supported(api_info, matcher->api_version)) {
- col_append_fstr(pinfo->cinfo, COL_INFO, " [Unsupported API version]");
+ col_append_str(pinfo->cinfo, COL_INFO, " [Unsupported API version]");
if (api_info->min_version == -1) {
expert_add_info_format(pinfo, ti, &ei_kafka_unsupported_api_version,
"Unsupported %s version.",
@@ -539,24 +887,37 @@ get_kafka_pdu_len(packet_info *pinfo _U_, tvbuff_t *tvb, int offset, void *data
}
static int
-dissect_kafka_array(proto_tree *tree, tvbuff_t *tvb, packet_info *pinfo, int offset,
+dissect_kafka_array_ref(proto_tree *tree, tvbuff_t *tvb, packet_info *pinfo, int offset,
kafka_api_version_t api_version,
- int(*func)(tvbuff_t*, packet_info*, proto_tree*, int, kafka_api_version_t))
+ int(*func)(tvbuff_t*, packet_info*, proto_tree*, int, kafka_api_version_t),
+ int *p_count)
{
gint32 count, i;
count = (gint32) tvb_get_ntohl(tvb, offset);
- proto_tree_add_item(tree, hf_kafka_array_count, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
for (i=0; i<count; i++) {
offset = func(tvb, pinfo, tree, offset, api_version);
}
+ if (p_count != NULL) {
+ *p_count = count;
+ }
+
return offset;
}
static int
+dissect_kafka_array(proto_tree *tree, tvbuff_t *tvb, packet_info *pinfo, int offset,
+ kafka_api_version_t api_version,
+ int(*func)(tvbuff_t*, packet_info*, proto_tree*, int, kafka_api_version_t))
+{
+ return dissect_kafka_array_ref(tree, tvb, pinfo, offset, api_version, func, NULL);
+}
+
+
+static int
dissect_kafka_string(proto_tree *tree, int hf_item, tvbuff_t *tvb, packet_info *pinfo, int offset,
int *p_string_offset, int *p_string_len)
{
@@ -632,46 +993,243 @@ dissect_kafka_bytes(proto_tree *tree, int hf_item, tvbuff_t *tvb, packet_info *p
return offset;
}
-static tvbuff_t *
-kafka_get_bytes(proto_tree *tree, tvbuff_t *tvb, packet_info *pinfo, int offset)
+static int
+dissect_kafka_timestamp(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset)
{
- gint32 len;
- proto_item *pi;
+ nstime_t nstime;
+ guint64 milliseconds;
- len = (gint32) tvb_get_ntohl(tvb, offset);
- pi = proto_tree_add_item(tree, hf_kafka_bytes_len, tvb, offset, 4, ENC_BIG_ENDIAN);
- offset += 4;
+ milliseconds = tvb_get_ntoh64(tvb, offset);
- if (len < -1) {
- expert_add_info(pinfo, pi, &ei_kafka_bad_bytes_length);
- return NULL;
+ nstime.secs = (time_t) (milliseconds / 1000);
+ nstime.nsecs = (int) ((milliseconds % 1000) * 1000000);
+
+ proto_tree_add_time(tree, hf_item, tvb, offset, 8, &nstime);
+ offset += 8;
+
+ return offset;
+}
+
+/*
+ * Function: tvb_read_kafka_varint32
+ * ---------------------------------------------------
+ * Reads 32-bit integer encoded as zig-zag. The 32-bit integer
+ * takes up to 5 octets (32 = 4*7+4).
+ *
+ * tvb: actual data buffer
+ * offset: offset in the buffer where the string length is to be found
+ * p_val: pointer to a variable to store the returned value
+ * p_len: pointer to a variable to store the length of the variable
+ *
+ * returns: pointer to the next field in the message or -1
+ */
+static gint32
+tvb_read_kafka_varint32(tvbuff_t *tvb, int offset, gint32 *p_val, int *p_len)
+{
+ gint32 v = 0;
+ guint8 p = 0;
+ int i = 0;
+
+ do {
+ p = tvb_get_guint8(tvb, offset+i);
+ v += (p & 0x7f) << (i*7);
+ i += 1;
+ } while ((p&0x80)!=0 && i<5);
+
+ // 32-bit integer in zig-zag can take up to 5 octets
+ // the last octet can take at most 4 bits
+
+ // either continuation bit is set or there are more than 32 bits
+ DISSECTOR_ASSERT((p&0x80) == 0);
+ DISSECTOR_ASSERT(i < 5 || (p&0x70) == 0);
+
+ if (p_val != NULL) {
+ *p_val = (v>>1) ^ ((v & 1) ? -1 : 0);
}
- else if (len == -1) {
- return NULL;
+ if (p_len != NULL) {
+ *p_len = i;
}
- else {
- return tvb_new_subset_length(tvb, offset, len);
+
+ return offset+i;
+}
+
+/*
+ * Function: tvb_read_kafka_varint64
+ * ---------------------------------------------------
+ * Reads 64-bit integer encoded as zig-zag. The 64-bit integer
+ * takes up to 10 octets (64 = 9*7+1).
+ *
+ * tvb: actual data buffer
+ * offset: offset in the buffer where the string length is to be found
+ * p_val: pointer to a variable to store the returned value
+ * p_len: pointer to a variable to store the length of the variable
+ *
+ * returns: pointer to the next field in the message or -1
+ */
+
+static gint64
+tvb_read_kafka_varint64(tvbuff_t *tvb, int offset, gint64 *p_val, int *p_len)
+{
+ gint64 v = 0;
+ guint8 p = 0;
+ int i = 0;
+
+ do {
+ p = tvb_get_guint8(tvb, offset+i);
+ v += (p & 0x7f) << (i*7);
+ i += 1;
+ } while ((p&0x80)!=0 && i<10);
+
+ // 64-bit integer in zig-zag can take up to 10 octets
+ // the last octet can take at most 1 bit
+
+ // either continuation bit is set or there are more than 64 bits
+ DISSECTOR_ASSERT((p&0x80) == 0);
+ DISSECTOR_ASSERT(i < 10 || (p&0x7e) == 0);
+
+ if (p_val != NULL) {
+ *p_val = (v>>1) ^ ((v & 1) ? -1 : 0);
+ }
+ if (p_len != NULL) {
+ *p_len = i;
}
+
+ return offset+i;
}
static int
-dissect_kafka_timestamp(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset)
+dissect_kafka_varint(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset)
+{
+ gint64 val;
+ int len;
+ tvb_read_kafka_varint64(tvb, offset, &val, &len);
+ proto_tree_add_int64(tree, hf_item, tvb, offset, len, val);
+ return offset+len;
+}
+
+static int
+dissect_kafka_timestamp_delta(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, guint64 first_timestamp)
{
nstime_t nstime;
guint64 milliseconds;
+ guint64 val;
+ int len;
- milliseconds = tvb_get_ntoh64(tvb, offset);
+ tvb_read_kafka_varint64(tvb, offset, &val, &len);
+
+ milliseconds = first_timestamp + val;
nstime.secs = (time_t) (milliseconds / 1000);
- nstime.nsecs = ((int)milliseconds % 1000) * 1000000;
+ nstime.nsecs = (int) ((milliseconds % 1000) * 1000000);
- proto_tree_add_time(tree, hf_item, tvb, offset, 8, &nstime);
- offset += 8;
+ proto_tree_add_time(tree, hf_item, tvb, offset, len, &nstime);
- return offset;
+ return offset+len;
+}
+
+static int
+dissect_kafka_offset_delta(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, guint64 base_offset)
+{
+ gint64 val;
+ int len;
+
+ tvb_read_kafka_varint64(tvb, offset, &val, &len);
+
+ proto_tree_add_int64(tree, hf_item, tvb, offset, len, base_offset+val);
+
+ return offset+len;
+}
+
+/*
+ * Function: dissect_kafka_string_new
+ * ---------------------------------------------------
+ * Decodes UTF string using the new length encoding. This format is used
+ * in the v2 message encoding, where the string length is encoded using
+ * ProtoBuf's ZigZag integer format (inspired by Avro). The main advantage
+ * of ZigZag is very compact representation for small numbers.
+ *
+ * tvb: actual data buffer
+ * pinfo: packet information (unused)
+ * tree: protocol information tree to append the item
+ * hf_item: protocol information item descriptor index
+ * offset: offset in the buffer where the string length is to be found
+ * p_string_offset: pointer to a variable to store the actual string begin
+ * p_string_length: pointer to a variable to store the actual string length
+ *
+ * returns: pointer to the next field in the message
+ */
+static int
+dissect_kafka_string_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, int *p_string_offset, int *p_string_length)
+{
+ gint val;
+ gint len;
+
+ tvb_read_kafka_varint32(tvb, offset, &val, &len);
+
+ DISSECTOR_ASSERT(val >= 0);
+
+ proto_tree_add_item(tree, hf_item, tvb, offset+len, val, ENC_UTF_8|ENC_NA);
+
+ if (p_string_offset != NULL) {
+ *p_string_offset = offset+len;
+ }
+ if (p_string_length != NULL) {
+ *p_string_length = val;
+ }
+
+ return offset+len+val;
+}
+
+/*
+ * Function: dissect_kafka_bytes_new
+ * ---------------------------------------------------
+ * Decodes byte buffer using the new length encoding. This format is used
+ * in the v2 message encoding, where the buffer length is encoded using
+ * ProtoBuf's ZigZag integer format (inspired by Avro). The main advantage
+ * of ZigZag is very compact representation for small numbers.
+ *
+ * tvb: actual data buffer
+ * pinfo: packet information (unused)
+ * tree: protocol information tree to append the item
+ * hf_item: protocol information item descriptor index
+ * offset: offset in the buffer where the string length is to be found
+ * p_bytes_offset: pointer to a variable to store the actual buffer begin
+ * p_bytes_length: pointer to a variable to store the actual buffer length
+ *
+ * returns: pointer to the next field in the message
+ */
+static int
+dissect_kafka_bytes_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, int *p_bytes_offset, int *p_bytes_length)
+{
+ gint val;
+ gint len;
+
+ tvb_read_kafka_varint32(tvb, offset, &val, &len);
+
+ DISSECTOR_ASSERT(val >= -1);
+
+ if (val > 0) {
+ // there is payload available, possibly with 0 octets
+ proto_tree_add_item(tree, hf_item, tvb, offset+len, val, ENC_NA);
+ } else if (val == 0) {
+ // there is empty payload (0 octets)
+ proto_tree_add_bytes_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<EMPTY>");
+ } else {
+ // there is no payload (null)
+ proto_tree_add_bytes_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<NULL>");
+ val = 0;
+ }
+ if (p_bytes_offset != NULL) {
+ *p_bytes_offset = offset+len;
+ }
+ if (p_bytes_length != NULL) {
+ *p_bytes_length = val;
+ }
+ return offset+len+val;
}
/* Calculate and show the reduction in transmitted size due to compression */
-static void show_compression_reduction(tvbuff_t *tvb, proto_tree *tree, guint compressed_size, guint uncompressed_size)
+static void
+show_compression_reduction(tvbuff_t *tvb, proto_tree *tree, guint compressed_size, guint uncompressed_size)
{
proto_item *ti;
/* Not really expecting a message to compress down to nothing, but defend against dividing by 0 anyway */
@@ -683,256 +1241,510 @@ static void show_compression_reduction(tvbuff_t *tvb, proto_tree *tree, guint co
}
static int
-dissect_kafka_message(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int start_offset)
+dissect_kafka_record_headers_header(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset)
{
- proto_item *message_ti, *decrypt_item;
+ proto_item *header_ti;
proto_tree *subtree;
- tvbuff_t *raw, *payload = NULL;
- int offset = start_offset;
+
+ int key_off, key_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_record_headers_header, &header_ti, "Header");
+
+ offset = dissect_kafka_string_new(tvb, pinfo, subtree, hf_kafka_record_header_key, offset, &key_off, &key_len);
+ offset = dissect_kafka_bytes_new(tvb, pinfo, subtree, hf_kafka_record_header_value, offset, NULL, NULL);
+
+ proto_item_append_text(header_ti, " (Key: %s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb, key_off, key_len, ENC_UTF_8));
+ proto_item_set_end(header_ti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_record_headers(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset)
+{
+ proto_item *record_headers_ti;
+ proto_tree *subtree;
+
+ gint32 count;
+ int i;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_record_headers, &record_headers_ti, "Headers");
+
+ tvb_read_kafka_varint32(tvb, offset, &count, NULL);
+ offset = dissect_kafka_varint(tvb, pinfo, subtree, hf_kafka_record_headers_count, offset);
+
+ // null array is marked by -1 as opposed to empty array marked by 0
+ DISSECTOR_ASSERT(count >= -1);
+ for (i=0;i<count;i++) {
+ offset = dissect_kafka_record_headers_header(tvb, pinfo, subtree, offset);
+ }
+
+ proto_item_set_end(record_headers_ti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_record(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int start_offset, guint64 base_offset, guint64 first_timestamp)
+{
+ proto_item *record_ti;
+ proto_tree *subtree;
+
+ gint32 val;
+ int len;
+
+ int offset, end_offset;
+
+ offset = start_offset;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_record, &record_ti, "Record");
+
+ tvb_read_kafka_varint32(tvb, offset, &val, &len);
+ DISSECTOR_ASSERT(val >= 0);
+ end_offset = offset + len + val;
+
+ offset = dissect_kafka_varint(tvb, pinfo, subtree, hf_kafka_record_length, offset);
+
+ proto_tree_add_item(subtree, hf_kafka_record_attributes, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_timestamp_delta(tvb, pinfo, subtree, hf_kafka_message_timestamp, offset, first_timestamp);
+ offset = dissect_kafka_offset_delta(tvb, pinfo, subtree, hf_kafka_offset, offset, base_offset);
+
+ offset = dissect_kafka_bytes_new(tvb, pinfo, subtree, hf_kafka_message_key, offset, NULL, NULL);
+ offset = dissect_kafka_bytes_new(tvb, pinfo, subtree, hf_kafka_message_value, offset, NULL, NULL);
+
+ offset = dissect_kafka_record_headers(tvb, pinfo, subtree, offset);
+
+ DISSECTOR_ASSERT(offset == end_offset);
+
+ proto_item_set_end(record_ti, tvb, offset);
+
+ return end_offset;
+}
+
+static int
+decompress_none(tvbuff_t *tvb, packet_info *pinfo _U_, int offset, int length _U_, tvbuff_t **decompressed_tvb, int *decompressed_offset)
+{
+ *decompressed_tvb = tvb;
+ *decompressed_offset = offset;
+ return 1;
+}
+
+static int
+decompress_gzip(tvbuff_t *tvb, packet_info *pinfo, int offset, int length, tvbuff_t **decompressed_tvb, int *decompressed_offset)
+{
+ *decompressed_tvb = tvb_child_uncompress(tvb, tvb, offset, length);
+ *decompressed_offset = 0;
+ if (*decompressed_tvb) {
+ return 1;
+ } else {
+ col_append_str(pinfo->cinfo, COL_INFO, " [gzip decompression failed] ");
+ return 0;
+ }
+}
+
+#ifdef HAVE_LZ4FRAME_H
+static int
+decompress_lz4(tvbuff_t *tvb, packet_info *pinfo, int offset, int length, tvbuff_t **decompressed_tvb, int *decompressed_offset)
+{
+ LZ4F_decompressionContext_t lz4_ctxt = NULL;
+ LZ4F_frameInfo_t lz4_info;
+ LZ4F_errorCode_t rc = 0;
+ size_t src_offset = 0, src_size = 0, dst_size = 0;
+ guchar *decompressed_buffer = NULL;
+ tvbuff_t *composite_tvb = tvb_new_composite();
+
+ int ret = 0;
+
+ /* Prepare compressed data buffer */
+ guint8 *data = (guint8*)tvb_memdup(wmem_packet_scope(), tvb, offset, length);
+ /* Override header checksum to workaround buggy Kafka implementations */
+ if (length > 7) {
+ int hdr_end = 6;
+ if (data[4] & 0x08) {
+ hdr_end += 8;
+ }
+ if (hdr_end < length) {
+ data[hdr_end] = (XXH32(&data[4], hdr_end - 4, 0) >> 8) & 0xff;
+ }
+ }
+
+ /* Allocate output buffer */
+ rc = LZ4F_createDecompressionContext(&lz4_ctxt, LZ4F_VERSION);
+ if (LZ4F_isError(rc)) {
+ goto end;
+ }
+
+ src_offset = length;
+ rc = LZ4F_getFrameInfo(lz4_ctxt, &lz4_info, data, &src_offset);
+ if (LZ4F_isError(rc)) {
+ goto end;
+ }
+
+ switch (lz4_info.blockSizeID) {
+ case LZ4F_max64KB:
+ dst_size = 1 << 16;
+ break;
+ case LZ4F_max256KB:
+ dst_size = 1 << 18;
+ break;
+ case LZ4F_max1MB:
+ dst_size = 1 << 20;
+ break;
+ case LZ4F_max4MB:
+ dst_size = 1 << 22;
+ break;
+ default:
+ goto end;
+ }
+
+ if (lz4_info.contentSize && lz4_info.contentSize < dst_size) {
+ dst_size = (size_t)lz4_info.contentSize;
+ }
+
+ do {
+ src_size = length - src_offset; // set the number of available octets
+ decompressed_buffer = (guchar*)wmem_alloc(pinfo->pool, dst_size);
+ rc = LZ4F_decompress(lz4_ctxt, decompressed_buffer, &dst_size,
+ &data[src_offset], &src_size, NULL);
+ if (LZ4F_isError(rc)) {
+ goto end;
+ }
+ tvb_composite_append(composite_tvb,
+ tvb_new_child_real_data(tvb, (guint8*)decompressed_buffer, (guint)dst_size, (gint)dst_size));
+ src_offset += src_size; // bump up the offset for the next iteration
+ } while (rc > 0);
+
+ tvb_composite_finalize(composite_tvb);
+ *decompressed_tvb = composite_tvb;
+ *decompressed_offset = 0;
+ composite_tvb = NULL;
+ ret = 1;
+end:
+ LZ4F_freeDecompressionContext(lz4_ctxt);
+ if (composite_tvb != NULL) {
+ tvb_free_chain(composite_tvb);
+ }
+ if (ret == 0) {
+ col_append_str(pinfo->cinfo, COL_INFO, " [lz4 decompression failed]");
+ }
+ return ret;
+}
+#else
+static int
+decompress_lz4(tvbuff_t *tvb _U_, packet_info *pinfo, int offset _U_, int length _U_, tvbuff_t **decompressed_tvb _U_, int *decompressed_offset _U_)
+{
+ col_append_str(pinfo->cinfo, COL_INFO, " [lz4 decompression unsupported]");
+ return 0;
+}
+#endif /* HAVE_LZ4FRAME_H */
+
+#ifdef HAVE_SNAPPY
+static int
+decompress_snappy(tvbuff_t *tvb, packet_info *pinfo, int offset, int length, tvbuff_t **decompressed_tvb, int *decompressed_offset)
+{
+ guint8 *data = (guint8*)tvb_memdup(wmem_packet_scope(), tvb, offset, length);
+ size_t uncompressed_size;
+ snappy_status rc = SNAPPY_OK;
+ tvbuff_t *composite_tvb = NULL;
+ int ret = 0;
+
+ if (tvb_memeql(tvb, offset, kafka_xerial_header, sizeof(kafka_xerial_header)) == 0) {
+
+ /* xerial framing format */
+ int chunk_size, pos = 16;
+
+ composite_tvb = tvb_new_composite();
+
+ while (pos < length) {
+ chunk_size = tvb_get_ntohl(tvb, offset+pos);
+ if (pos+chunk_size > length) {
+ // fail when the declared chunk size does not fit into remaining content
+ goto end;
+ }
+ pos += 4;
+ rc = snappy_uncompressed_length(&data[pos], chunk_size, &uncompressed_size);
+ if (rc != SNAPPY_OK) {
+ goto end;
+ }
+ guint8 *decompressed_buffer = (guint8*)wmem_alloc(pinfo->pool, uncompressed_size);
+ rc = snappy_uncompress(&data[pos], chunk_size, decompressed_buffer, &uncompressed_size);
+ if (rc != SNAPPY_OK) {
+ goto end;
+ }
+ tvb_composite_append(composite_tvb,
+ tvb_new_child_real_data(tvb, decompressed_buffer, (guint)uncompressed_size, (gint)uncompressed_size));
+ pos += chunk_size;
+ }
+
+ tvb_composite_finalize(composite_tvb);
+ *decompressed_tvb = composite_tvb;
+ *decompressed_offset = 0;
+ composite_tvb = NULL;
+
+ } else {
+
+ /* unframed format */
+ rc = snappy_uncompressed_length(data, length, &uncompressed_size);
+ if (rc != SNAPPY_OK) {
+ goto end;
+ }
+
+ guint8 *decompressed_buffer = (guint8*)wmem_alloc(pinfo->pool, uncompressed_size);
+
+ rc = snappy_uncompress(data, length, decompressed_buffer, &uncompressed_size);
+ if (rc != SNAPPY_OK) {
+ goto end;
+ }
+
+ *decompressed_tvb = tvb_new_child_real_data(tvb, decompressed_buffer, (guint)uncompressed_size, (gint)uncompressed_size);
+ *decompressed_offset = 0;
+
+ }
+ ret = 1;
+end:
+ if (composite_tvb != NULL) {
+ tvb_free_chain(composite_tvb);
+ }
+ if (ret == 0) {
+ col_append_str(pinfo->cinfo, COL_INFO, " [snappy decompression failed]");
+ }
+ return ret;
+}
+#else
+static int
+decompress_snappy(tvbuff_t *tvb _U_, packet_info *pinfo, int offset _U_, int length _U_, tvbuff_t **decompressed_tvb _U_, int *decompressed_offset _U_)
+{
+ col_append_str(pinfo->cinfo, COL_INFO, " [snappy decompression unsupported]");
+ return 0;
+}
+#endif /* HAVE_SNAPPY */
+
+static int
+decompress_zstd(tvbuff_t *tvb _U_, packet_info *pinfo, int offset _U_, int length _U_, tvbuff_t **decompressed_tvb _U_, int *decompressed_offset _U_)
+{
+ col_append_str(pinfo->cinfo, COL_INFO, " [zstd compression unsupported]");
+ return 0;
+}
+
+static int
+decompress(tvbuff_t *tvb, packet_info *pinfo, int offset, int length, int codec, tvbuff_t **decompressed_tvb, int *decompressed_offset)
+{
+ switch (codec) {
+ case KAFKA_MESSAGE_CODEC_SNAPPY:
+ return decompress_snappy(tvb, pinfo, offset, length, decompressed_tvb, decompressed_offset);
+ case KAFKA_MESSAGE_CODEC_LZ4:
+ return decompress_lz4(tvb, pinfo, offset, length, decompressed_tvb, decompressed_offset);
+ case KAFKA_MESSAGE_CODEC_ZSTD:
+ return decompress_zstd(tvb, pinfo, offset, length, decompressed_tvb, decompressed_offset);
+ case KAFKA_MESSAGE_CODEC_GZIP:
+ return decompress_gzip(tvb, pinfo, offset, length, decompressed_tvb, decompressed_offset);
+ case KAFKA_MESSAGE_CODEC_NONE:
+ return decompress_none(tvb, pinfo, offset, length, decompressed_tvb, decompressed_offset);
+ default:
+ col_append_str(pinfo->cinfo, COL_INFO, " [unsupported compression type]");
+ return 0;
+ }
+}
+
+/*
+ * Function: dissect_kafka_message_old
+ * ---------------------------------------------------
+ * Handles decoding of pre-0.11 message format. In the old format
+ * only the message payload was the subject of compression
+ * and the batches were special kind of message payload.
+ *
+ * https://kafka.apache.org/0100/documentation/#messageformat
+ *
+ * tvb: actual data buffer
+ * pinfo: packet information
+ * tree: protocol information tree to append the item
+ * hf_item: protocol information item descriptor index
+ * offset: pointer to the message
+ *
+ * returns: pointer to the next message/batch
+ */
+static int
+dissect_kafka_message_old(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset)
+{
+ proto_item *message_ti;
+ proto_tree *subtree;
+ tvbuff_t *decompressed_tvb;
+ int decompressed_offset;
+ int start_offset = offset;
gint8 magic_byte;
guint8 codec;
- guint bytes_length = 0;
+ guint32 message_size;
+ guint32 length;
+
+ message_size = tvb_get_guint32(tvb, start_offset + 8, ENC_BIG_ENDIAN);
+
+ subtree = proto_tree_add_subtree(tree, tvb, start_offset, message_size + 12, ett_kafka_message, &message_ti, "Message");
+ proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_message, &message_ti, "Message");
+ proto_tree_add_item(subtree, hf_kafka_message_size, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
- /* CRC */
proto_tree_add_item(subtree, hf_kafka_message_crc, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
- /* Magic */
magic_byte = tvb_get_guint8(tvb, offset);
+
proto_tree_add_item(subtree, hf_kafka_message_magic, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
- /* Codec */
proto_tree_add_item(subtree, hf_kafka_message_codec, tvb, offset, 1, ENC_BIG_ENDIAN);
codec = tvb_get_guint8(tvb, offset) & KAFKA_MESSAGE_CODEC_MASK;
-
- /* Timestamp Type */
proto_tree_add_item(subtree, hf_kafka_message_timestamp_type, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
- if (magic_byte >= 1) {
- /* Timestamp */
+ if (magic_byte == 1) {
offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_message_timestamp, offset);
}
- offset = dissect_kafka_bytes(subtree, hf_kafka_message_key, tvb, pinfo, offset, NULL, &bytes_length);
+ offset = dissect_kafka_bytes(subtree, hf_kafka_message_key, tvb, pinfo, offset, NULL, NULL);
- switch (codec) {
- case KAFKA_MESSAGE_CODEC_GZIP:
- raw = kafka_get_bytes(subtree, tvb, pinfo, offset);
- offset += 4;
+ /*
+ * depending on the compression codec, the payload is the actual message payload (codes=none)
+ * or compressed set of messages (otherwise). In the new format (since Kafka 1.0) there
+ * is no such duality.
+ */
+ if (codec == 0) {
+ offset = dissect_kafka_bytes(subtree, hf_kafka_message_value, tvb, pinfo, offset, NULL, &length);
+ } else {
+ length = tvb_get_ntohl(tvb, offset);
+ offset += 4;
+ if (decompress(tvb, pinfo, offset, length, codec, &decompressed_tvb, &decompressed_offset)==1) {
+ add_new_data_source(pinfo, decompressed_tvb, "Decompressed content");
+ show_compression_reduction(tvb, subtree, length, tvb_captured_length(decompressed_tvb));
+ dissect_kafka_message_set(decompressed_tvb, pinfo, subtree, decompressed_offset, FALSE, codec);
+ } else {
+ proto_item_append_text(subtree, " [Cannot decompress records]");
+ }
+ }
- if (raw) {
- guint compressed_size = tvb_captured_length(raw);
+ proto_item_set_len(message_ti, offset - start_offset);
- /* Raw compressed data */
- proto_tree_add_item(subtree, hf_kafka_message_value_compressed, tvb, offset, compressed_size, ENC_NA);
+ return offset;
+}
- /* Unzip message and add payload to new data tab */
- payload = tvb_child_uncompress(tvb, raw, 0, compressed_size);
- if (payload) {
- show_compression_reduction(tvb, subtree, compressed_size, (guint)tvb_captured_length(payload));
+/*
+ * Function: dissect_kafka_message_new
+ * ---------------------------------------------------
+ * Handles decoding of the new message format. In the new format
+ * there is no difference between compressed and plain batch.
+ *
+ * https://kafka.apache.org/documentation/#messageformat
+ *
+ * tvb: actual data buffer
+ * pinfo: packet information
+ * tree: protocol information tree to append the item
+ * hf_item: protocol information item descriptor index
+ * offset: pointer to the message
+ *
+ * returns: pointer to the next message/batch
+ */
+static int
+dissect_kafka_message_new(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset)
+{
+ proto_item *batch_ti;
+ proto_tree *subtree;
+ int start_offset = offset;
+ gint8 magic_byte;
+ guint8 codec;
+ guint32 message_size;
+ guint32 count, i, length;
+ guint64 base_offset, first_timestamp;
- add_new_data_source(pinfo, payload, "Uncompressed Message");
- dissect_kafka_message_set(payload, pinfo, subtree, 0, FALSE, codec);
- } else {
- decrypt_item = proto_tree_add_item(subtree, hf_kafka_message_value, raw, 0, -1, ENC_NA);
- expert_add_info(pinfo, decrypt_item, &ei_kafka_message_decompress);
- }
- offset += compressed_size;
+ tvbuff_t *decompressed_tvb;
+ int decompressed_offset;
- /* Add to summary */
- col_append_fstr(pinfo->cinfo, COL_INFO, " [GZIPd message set]");
- proto_item_append_text(message_ti, " (GZIPd message set)");
- }
- else {
- proto_tree_add_bytes(subtree, hf_kafka_message_value, tvb, offset, 0, NULL);
- }
- break;
- case KAFKA_MESSAGE_CODEC_SNAPPY:
- raw = kafka_get_bytes(subtree, tvb, pinfo, offset);
- offset += 4;
- if (raw) {
-#ifdef HAVE_SNAPPY
- guint compressed_size = tvb_reported_length(raw);
- guint8 *data = (guint8*)tvb_memdup(wmem_packet_scope(), raw, 0, compressed_size);
- size_t uncompressed_size;
- snappy_status ret = SNAPPY_INVALID_INPUT;
-
- /* Raw compressed data */
- proto_tree_add_item(subtree, hf_kafka_message_value_compressed, tvb, offset, compressed_size, ENC_NA);
-
- if (tvb_memeql(raw, 0, kafka_xerial_header, sizeof(kafka_xerial_header)) == 0) {
- /* xerial framing format */
- guint chunk_size, pos = 16;
-
- payload = tvb_new_composite();
- while (pos < compressed_size) {
- chunk_size = tvb_get_ntohl(raw, pos);
- pos += 4;
- ret = snappy_uncompressed_length(&data[pos], chunk_size, &uncompressed_size);
- if (ret == SNAPPY_OK) {
- guint8 *decompressed_buffer = (guint8*)wmem_alloc(pinfo->pool, uncompressed_size);
-
- ret = snappy_uncompress(&data[pos], chunk_size, decompressed_buffer, &uncompressed_size);
- if (ret == SNAPPY_OK) {
- tvb_composite_append(payload,
- tvb_new_child_real_data(tvb, decompressed_buffer,
- (guint32)uncompressed_size, (guint32)uncompressed_size));
- } else {
- wmem_free(pinfo->pool, decompressed_buffer);
- break;
- }
- }
- pos += chunk_size;
- }
- tvb_composite_finalize(payload);
- } else {
- /* unframed format */
- ret = snappy_uncompressed_length(data, compressed_size, &uncompressed_size);
- if (ret == SNAPPY_OK) {
- guint8 *decompressed_buffer = (guint8*)wmem_alloc(pinfo->pool, uncompressed_size);
-
- ret = snappy_uncompress(data, compressed_size, decompressed_buffer, &uncompressed_size);
- if (ret == SNAPPY_OK) {
- payload = tvb_new_child_real_data(tvb, decompressed_buffer,
- (guint32)uncompressed_size, (guint32)uncompressed_size);
- } else {
- wmem_free(pinfo->pool, decompressed_buffer);
- }
- }
- }
- if (ret == SNAPPY_OK) {
- show_compression_reduction(tvb, subtree, compressed_size, (guint)uncompressed_size);
-
- add_new_data_source(pinfo, payload, "Uncompressed Message");
- dissect_kafka_message_set(payload, pinfo, subtree, 0, FALSE, codec);
-
- /* Add to summary */
- col_append_fstr(pinfo->cinfo, COL_INFO, " [Snappy-compressed message set]");
- proto_item_append_text(message_ti, " (Snappy-compressed message set)");
- } else {
- decrypt_item = proto_tree_add_item(subtree, hf_kafka_message_value, raw, 0, -1, ENC_NA);
- expert_add_info(pinfo, decrypt_item, &ei_kafka_message_decompress);
- }
- offset += tvb_captured_length(raw);
-#else
- decrypt_item = proto_tree_add_item(subtree, hf_kafka_message_value, raw, 0, -1, ENC_NA);
- expert_add_info_format(pinfo, decrypt_item, &ei_kafka_message_decompress, "Wireshark not compiled with Snappy support");
-#endif
- }
- break;
- case KAFKA_MESSAGE_CODEC_LZ4:
- raw = kafka_get_bytes(subtree, tvb, pinfo, offset);
- offset += 4;
- if (raw) {
-#ifdef HAVE_LZ4FRAME_H
- LZ4F_decompressionContext_t lz4_ctxt;
- LZ4F_frameInfo_t lz4_info;
- LZ4F_errorCode_t ret;
- size_t src_offset, src_size, dst_size;
- guchar *decompressed_buffer = NULL;
-
- /* Prepare compressed data buffer */
- guint compressed_size = tvb_reported_length(raw);
- guint8 *data = (guint8*)tvb_memdup(wmem_packet_scope(), raw, 0, compressed_size);
- /* Override header checksum to workaround buggy Kafka implementations */
- if (compressed_size > 7) {
- guint hdr_end = 6;
- if (data[4] & 0x08) {
- hdr_end += 8;
- }
- if (hdr_end < compressed_size) {
- data[hdr_end] = (XXH32(&data[4], hdr_end - 4, 0) >> 8) & 0xff;
- }
- }
+ message_size = tvb_get_guint32(tvb, start_offset + 8, ENC_BIG_ENDIAN);
- /* Show raw compressed data */
- proto_tree_add_item(subtree, hf_kafka_message_value_compressed, tvb, offset, compressed_size, ENC_NA);
+ subtree = proto_tree_add_subtree(tree, tvb, start_offset, message_size + 12, ett_kafka_batch, &batch_ti, "Record Batch");
- /* Allocate output buffer */
- ret = LZ4F_createDecompressionContext(&lz4_ctxt, LZ4F_VERSION);
- if (LZ4F_isError(ret)) {
- goto fail;
- }
- src_offset = compressed_size;
- ret = LZ4F_getFrameInfo(lz4_ctxt, &lz4_info, data, &src_offset);
- if (LZ4F_isError(ret)) {
- LZ4F_freeDecompressionContext(lz4_ctxt);
- goto fail;
- }
- switch (lz4_info.blockSizeID) {
- case LZ4F_max64KB:
- dst_size = 1 << 16;
- break;
- case LZ4F_max256KB:
- dst_size = 1 << 18;
- break;
- case LZ4F_max1MB:
- dst_size = 1 << 20;
- break;
- case LZ4F_max4MB:
- dst_size = 1 << 22;
- break;
- default:
- LZ4F_freeDecompressionContext(lz4_ctxt);
- goto fail;
- }
- if (lz4_info.contentSize && lz4_info.contentSize < dst_size) {
- dst_size = (size_t)lz4_info.contentSize;
- }
- decompressed_buffer = (guchar*)wmem_alloc(pinfo->pool, dst_size);
-
- /* Attempt the decompression. */
- src_size = compressed_size - src_offset;
- ret = LZ4F_decompress(lz4_ctxt, decompressed_buffer, &dst_size,
- &data[src_offset], &src_size, NULL);
- LZ4F_freeDecompressionContext(lz4_ctxt);
- if (ret == 0) {
- size_t uncompressed_size = dst_size;
-
- show_compression_reduction(tvb, subtree, compressed_size, (guint)uncompressed_size);
-
- /* Add as separate data tab */
- payload = tvb_new_child_real_data(tvb, decompressed_buffer,
- (guint32)uncompressed_size, (guint32)uncompressed_size);
- add_new_data_source(pinfo, payload, "Uncompressed Message");
-
- /* Dissect as a message set */
- dissect_kafka_message_set(payload, pinfo, subtree, 0, FALSE, codec);
-
- /* Add to summary */
- col_append_fstr(pinfo->cinfo, COL_INFO, " [LZ4-compressed message set]");
- proto_item_append_text(message_ti, " (LZ4-compressed message set)");
- } else {
- fail:
- /* Error */
- decrypt_item = proto_tree_add_item(subtree, hf_kafka_message_value, raw, 0, -1, ENC_NA);
- expert_add_info(pinfo, decrypt_item, &ei_kafka_message_decompress);
- }
- offset += compressed_size;
-#else
- decrypt_item = proto_tree_add_item(subtree, hf_kafka_message_value, raw, 0, -1, ENC_NA);
- expert_add_info_format(pinfo, decrypt_item, &ei_kafka_message_decompress, "Wireshark not compiled with LZ4 support");
-#endif /* HAVE_LZ4FRAME_H */
- }
- break;
+ base_offset = tvb_get_ntoh64(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
- case KAFKA_MESSAGE_CODEC_NONE:
- default:
- offset = dissect_kafka_bytes(subtree, hf_kafka_message_value, tvb, pinfo, offset, NULL, &bytes_length);
+ proto_tree_add_item(subtree, hf_kafka_message_size, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
- /* Add to summary */
- col_append_fstr(pinfo->cinfo, COL_INFO, " [%u bytes]", bytes_length);
- proto_item_append_text(message_ti, " (%u bytes)", bytes_length);
+ magic_byte = tvb_get_guint8(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_message_magic, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ if (magic_byte != 2) {
+ proto_item_append_text(subtree, "[Unknown message magic]");
+ expert_add_info_format(pinfo, batch_ti, &ei_kafka_unknown_message_magic,
+ "message magic: %d", magic_byte);
+ return start_offset + 8 /*base offset*/ + 4 /*message size*/ + message_size;
}
- proto_item_set_len(message_ti, offset - start_offset);
+ proto_tree_add_item(subtree, hf_kafka_batch_crc, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
- return offset;
+ proto_tree_add_item(subtree, hf_kafka_batch_codec, tvb, offset, 2, ENC_BIG_ENDIAN);
+ codec = tvb_get_ntohs(tvb, offset) & KAFKA_MESSAGE_CODEC_MASK;
+ proto_tree_add_item(subtree, hf_kafka_batch_timestamp_type, tvb, offset, 2, ENC_BIG_ENDIAN);
+ proto_tree_add_item(subtree, hf_kafka_batch_transactional, tvb, offset, 2, ENC_BIG_ENDIAN);
+ proto_tree_add_item(subtree, hf_kafka_batch_control_batch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ // next octet is reserved
+ offset += 2;
+
+ proto_tree_add_item(subtree, hf_kafka_batch_last_offset_delta, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ first_timestamp = tvb_get_ntoh64(tvb, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_batch_first_timestamp, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_batch_last_timestamp, offset);
+
+ proto_tree_add_item(subtree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+ proto_tree_add_item(subtree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+ proto_tree_add_item(subtree, hf_kafka_batch_base_sequence, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ proto_tree_add_item(subtree, hf_kafka_batch_size, tvb, offset, 4, ENC_BIG_ENDIAN);
+ count = tvb_get_ntohl(tvb, offset);
+ offset += 4;
+
+ length = start_offset + 8 /*base offset*/ + 4 /*message size*/ + message_size - offset;
+
+ if (decompress(tvb, pinfo, offset, length, codec, &decompressed_tvb, &decompressed_offset)==1) {
+ if (codec != 0) {
+ add_new_data_source(pinfo, decompressed_tvb, "Decompressed Records");
+ show_compression_reduction(tvb, subtree, length, tvb_captured_length(decompressed_tvb));
+ }
+ for (i=0;i<count;i++) {
+ decompressed_offset = dissect_kafka_record(decompressed_tvb, pinfo, subtree, decompressed_offset, base_offset, first_timestamp);
+ }
+ } else {
+ proto_item_append_text(subtree, " [Cannot decompress records]");
+ }
+
+ return start_offset + 8 /*base offset*/ + 4 /*message size*/ + message_size;
+}
+
+static int
+dissect_kafka_message(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset)
+{
+ gint8 magic_byte;
+
+ magic_byte = tvb_get_guint8(tvb, offset+16);
+ if (magic_byte < 2) {
+ return dissect_kafka_message_old(tvb, pinfo, tree, offset);
+ } else {
+ return dissect_kafka_message_new(tvb, pinfo, tree, offset);
+ }
}
static int
@@ -940,7 +1752,7 @@ dissect_kafka_message_set(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, i
{
proto_item *ti;
proto_tree *subtree;
- gint len;
+ gint len, message_size;
int offset = start_offset;
int messages = 0;
@@ -961,18 +1773,18 @@ dissect_kafka_message_set(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, i
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_message_set, &ti, "Message Set");
/* If set came from a compressed message, make it obvious in tree root */
if (codec != KAFKA_MESSAGE_CODEC_NONE) {
- proto_item_append_text(subtree, " [from compressed %s message]", val_to_str_const(codec, kafka_message_codecs, "Unknown codec"));
+ proto_item_append_text(subtree, " [from compressed %s message]", val_to_str_const(codec, kafka_message_codecs, "Unknown"));
}
while (offset - start_offset < len) {
- proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
- offset += 8;
- proto_tree_add_item(subtree, hf_kafka_message_size, tvb, offset, 4, ENC_BIG_ENDIAN);
- offset += 4;
+ message_size = tvb_get_guint32(tvb, offset+8, ENC_BIG_ENDIAN);
+
+ dissect_kafka_message(tvb, pinfo, subtree, offset);
+ offset += 12 + message_size;
- offset = dissect_kafka_message(tvb, pinfo, subtree, offset);
messages += 1;
+
}
proto_item_append_text(ti, " (%d Messages)", messages);
@@ -984,33 +1796,32 @@ dissect_kafka_message_set(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, i
/* OFFSET FETCH REQUEST/RESPONSE */
static int
-dissect_kafka_partition_id(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
- kafka_api_version_t api_version _U_)
+dissect_kafka_partition_id_ret(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
+ kafka_partition_t *p_partition)
{
proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ if (p_partition != NULL) {
+ *p_partition = tvb_get_ntohl(tvb, offset);
+ }
offset += 4;
return offset;
}
static int
-dissect_kafka_partition_id_get_value(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset, kafka_packet_values_t* packet_values)
+dissect_kafka_partition_id(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
{
- proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
- if (packet_values != NULL) {
- packet_values->partition_id = tvb_get_ntohl(tvb, offset);
- }
- offset += 4;
-
- return offset;
+ return dissect_kafka_partition_id_ret(tvb, pinfo, tree, offset, NULL);
}
static int
-dissect_kafka_offset_get_value(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset, kafka_packet_values_t* packet_values)
+dissect_kafka_offset_ret(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
+ kafka_offset_t *p_offset)
{
proto_tree_add_item(tree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
- if (packet_values != NULL) {
- packet_values->offset = tvb_get_ntoh64(tvb, offset);
+ if (p_offset != NULL) {
+ *p_offset = tvb_get_ntoh64(tvb, offset);
}
offset += 8;
@@ -1018,11 +1829,18 @@ dissect_kafka_offset_get_value(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree
}
static int
-dissect_kafka_offset(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
+dissect_kafka_offset(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
- proto_tree_add_item(tree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
- offset += 8;
+ return dissect_kafka_offset_ret(tvb, pinfo, tree, offset, NULL);
+}
+
+static int
+dissect_kafka_leader_epoch(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
return offset;
}
@@ -1031,9 +1849,20 @@ static int
dissect_kafka_offset_time(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
- proto_tree_add_item(tree, hf_kafka_offset_time, tvb, offset, 8, ENC_BIG_ENDIAN);
+ proto_item *ti;
+ gint64 time;
+
+ time = tvb_get_ntoh64(tvb, offset);
+
+ ti = proto_tree_add_item(tree, hf_kafka_offset_time, tvb, offset, 8, ENC_BIG_ENDIAN);
offset += 8;
+ if (time == -1) {
+ proto_item_append_text(ti, " (latest)");
+ } else if (time == -2) {
+ proto_item_append_text(ti, " (earliest)");
+ }
+
return offset;
}
@@ -1044,16 +1873,47 @@ dissect_kafka_offset_fetch_request_topic(tvbuff_t *tvb, packet_info *pinfo, prot
proto_item *ti;
proto_tree *subtree;
int offset = start_offset;
- guint32 count;
+ gint32 count;
+ int topic_start, topic_len;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_topic, &ti, "Offset Fetch Request Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
- offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
- count = (gint32)tvb_get_ntohl(tvb, offset);
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ count = tvb_get_ntohil(tvb, offset);
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version, &dissect_kafka_partition_id);
proto_item_set_len(ti, offset - start_offset);
- proto_item_append_text(ti, " (%u partitions)", count);
+ proto_item_append_text(ti, " (Topic: %s, Partitions: %u)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb, topic_start, topic_len, ENC_UTF_8|ENC_NA),
+ count);
+
+ return offset;
+}
+
+static int
+dissect_kafka_offset_fetch_request_topics(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int start_offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *ti;
+ proto_tree *subtree;
+ int offset = start_offset;
+ gint32 count;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topics, &ti, "Topics");
+
+ count = tvb_get_ntohil(tvb, offset);
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_offset_fetch_request_topic);
+
+ proto_item_set_len(ti, offset - start_offset);
+
+ if (count < 0) {
+ proto_item_append_text(ti, " (all committed topics)");
+ } else {
+ proto_item_append_text(ti, " (%u topics)", count);
+ }
return offset;
}
@@ -1063,8 +1923,8 @@ dissect_kafka_offset_fetch_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree
kafka_api_version_t api_version)
{
offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset, NULL, NULL);
- offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
- &dissect_kafka_offset_fetch_request_topic);
+
+ offset = dissect_kafka_offset_fetch_request_topics(tvb, pinfo, tree, offset, api_version);
return offset;
}
@@ -1097,6 +1957,14 @@ dissect_kafka_error(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int off
}
static int
+dissect_kafka_throttle_time(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset)
+{
+ proto_tree_add_item(tree, hf_kafka_throttle_time, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ return offset;
+}
+
+static int
dissect_kafka_offset_fetch_response_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int start_offset, kafka_api_version_t api_version)
{
@@ -1106,17 +1974,26 @@ dissect_kafka_offset_fetch_response_partition(tvbuff_t *tvb, packet_info *pinfo,
kafka_packet_values_t packet_values;
memset(&packet_values, 0, sizeof(packet_values));
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_partition, &ti, "Offset Fetch Response Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &ti, "Partition");
- offset = dissect_kafka_partition_id_get_value(tvb, pinfo, subtree, offset, &packet_values);
- offset = dissect_kafka_offset(tvb, pinfo, subtree, offset, api_version);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &packet_values.partition_id);
+ offset = dissect_kafka_offset_ret(tvb, pinfo, subtree, offset, &packet_values.offset);
+
+ if (api_version >= 5) {
+ offset = dissect_kafka_leader_epoch(tvb, pinfo, subtree, offset, api_version);
+ }
offset = dissect_kafka_string(subtree, hf_kafka_metadata, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
- proto_item_append_text(ti, " (Partition-ID=%u, Offset=%" G_GINT64_MODIFIER "u)",
- packet_values.partition_id, packet_values.offset);
+ if (packet_values.offset==-1) {
+ proto_item_append_text(ti, " (ID=%u, Offset=None)",
+ packet_values.partition_id);
+ } else {
+ proto_item_append_text(ti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)",
+ packet_values.partition_id, packet_values.offset);
+ }
proto_item_set_len(ti, offset - start_offset);
@@ -1131,7 +2008,7 @@ dissect_kafka_offset_fetch_response_topic(tvbuff_t *tvb, packet_info *pinfo, pro
proto_tree *subtree;
int offset = start_offset;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_topic, &ti, "offset fetch response topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
@@ -1146,9 +2023,8 @@ static int
dissect_kafka_offset_fetch_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
- if (api_version >= 1) {
- proto_tree_add_item(tree, hf_kafka_throttle_time, tvb, offset, 4, ENC_BIG_ENDIAN);
- offset += 4;
+ if (api_version >= 3) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
}
return dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
@@ -1168,8 +2044,25 @@ static int
dissect_kafka_metadata_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
- return dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
+ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
&dissect_kafka_metadata_request_topic);
+
+ if (api_version >= 4) {
+ proto_tree_add_item(tree, hf_kafka_allow_auto_topic_creation, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ if (api_version >= 8) {
+ proto_tree_add_item(tree, hf_kafka_include_cluster_authorized_ops, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ if (api_version >= 8) {
+ proto_tree_add_item(tree, hf_kafka_include_topic_authorized_ops, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ return offset;
}
static int
@@ -1196,7 +2089,7 @@ dissect_kafka_metadata_broker(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tre
offset += 4;
if (api_version >= 1) {
- offset = dissect_kafka_string(subtree, hf_kafka_broker_rack, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_string(subtree, hf_kafka_rack, tvb, pinfo, offset, NULL, NULL);
}
proto_item_append_text(ti, " (node %u: %s:%u)",
@@ -1227,6 +2120,14 @@ dissect_kafka_metadata_isr(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tr
}
static int
+dissect_kafka_metadata_offline(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_offline, tvb, offset, 4, ENC_BIG_ENDIAN);
+ return offset + 4;
+}
+
+static int
dissect_kafka_metadata_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int start_offset,
kafka_api_version_t api_version)
{
@@ -1234,16 +2135,22 @@ dissect_kafka_metadata_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *
proto_tree *subtree, *subsubtree;
int offset = start_offset;
int sub_start_offset;
+ kafka_partition_t partition;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_partition, &ti, "Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &ti, "Partition");
offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
- offset = dissect_kafka_partition_id(tvb, pinfo, subtree, offset, api_version);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &partition);
- proto_tree_add_item(subtree, hf_kafka_partition_leader, tvb, offset, 4, ENC_BIG_ENDIAN);
+ proto_tree_add_item(subtree, hf_kafka_leader_id, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
+ if (api_version >= 7) {
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
sub_start_offset = offset;
subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_replicas, &subti, "Replicas");
offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version, &dissect_kafka_metadata_replica);
@@ -1254,7 +2161,15 @@ dissect_kafka_metadata_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *
offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version, &dissect_kafka_metadata_isr);
proto_item_set_len(subti, offset - sub_start_offset);
+ if (api_version >= 5) {
+ sub_start_offset = offset;
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_offline, &subti, "Offline Replicas");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version, &dissect_kafka_metadata_offline);
+ proto_item_set_len(subti, offset - sub_start_offset);
+ }
+
proto_item_set_len(ti, offset - start_offset);
+ proto_item_append_text(ti, " (ID=%u)", partition);
return offset;
}
@@ -1268,7 +2183,7 @@ dissect_kafka_metadata_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree
int offset = start_offset;
int name_start, name_length;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_topic, &ti, "Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
@@ -1284,6 +2199,11 @@ dissect_kafka_metadata_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version, &dissect_kafka_metadata_partition);
+ if (api_version >= 8) {
+ proto_tree_add_item(subtree, hf_kafka_topic_authorized_ops, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
proto_item_set_len(ti, offset - start_offset);
return offset;
@@ -1298,6 +2218,11 @@ dissect_kafka_metadata_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *t
int offset = start_offset;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_brokers, &ti, "Broker Metadata");
+
+ if (api_version >= 3) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version, &dissect_kafka_metadata_broker);
proto_item_set_len(ti, offset - start_offset);
@@ -1315,6 +2240,11 @@ dissect_kafka_metadata_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *t
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version, &dissect_kafka_metadata_topic);
proto_item_set_len(ti, offset - start_offset);
+ if (api_version >= 8) {
+ proto_tree_add_item(tree, hf_kafka_cluster_authorized_ops, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
return offset;
}
@@ -1342,6 +2272,7 @@ dissect_kafka_leader_and_isr_request_replica(tvbuff_t *tvb, packet_info *pinfo _
return offset;
}
+
static int
dissect_kafka_leader_and_isr_request_partition_state(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, kafka_api_version_t api_version)
@@ -1352,12 +2283,14 @@ dissect_kafka_leader_and_isr_request_partition_state(tvbuff_t *tvb, packet_info
kafka_partition_t partition;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
- ett_kafka_request_partition,
- &subti, "Partition State");
+ ett_kafka_partition,
+ &subti, "Partition");
- /* topic */
- offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
- &topic_start, &topic_len);
+ if (api_version < 2) {
+ /* topic */
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
+ &topic_start, &topic_len);
+ }
/* partition */
partition = (kafka_partition_t) tvb_get_ntohl(tvb, offset);
@@ -1391,16 +2324,67 @@ dissect_kafka_leader_and_isr_request_partition_state(tvbuff_t *tvb, packet_info
/* [replica] */
subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
ett_kafka_replicas,
- &subsubti, "Replicas");
+ &subsubti, "Current Replicas");
offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
&dissect_kafka_leader_and_isr_request_replica);
proto_item_set_end(subsubti, tvb, offset);
+ if (api_version >= 3) {
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_replicas,
+ &subsubti, "Adding Replicas");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_leader_and_isr_request_replica);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_replicas,
+ &subsubti, "Removing Replicas");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_leader_and_isr_request_replica);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ }
+
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (Topic=%s, Partition-ID=%u)",
+
+ if (api_version < 2) {
+ proto_item_append_text(subti, " (Topic=%s, Partition-ID=%u)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA),
+ partition);
+ } else {
+ proto_item_append_text(subti, " (Partition-ID=%u)",
+ partition);
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_leader_and_isr_request_topic_state(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_tree *subtree;
+ proto_item *subti;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topic,
+ &subti, "Topic");
+
+ /* topic */
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
+ &topic_start, &topic_len);
+
+ /* [partition_state] */
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_leader_and_isr_request_partition_state);
+
+ proto_item_append_text(subti, " (Name=%s)",
tvb_get_string_enc(wmem_packet_scope(), tvb,
- topic_start, topic_len, ENC_UTF_8|ENC_NA),
- partition);
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
return offset;
}
@@ -1455,9 +2439,21 @@ dissect_kafka_leader_and_isr_request(tvbuff_t *tvb, packet_info *pinfo, proto_tr
proto_tree_add_item(tree, hf_kafka_controller_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
- /* [partition_state] */
- offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
- &dissect_kafka_leader_and_isr_request_partition_state);
+ if (api_version >= 2) {
+ /* broker_epoch */
+ proto_tree_add_item(tree, hf_kafka_broker_epoch, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+ }
+
+ if (api_version >= 2) {
+ /* [topic_state] */
+ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_leader_and_isr_request_topic_state);
+ } else {
+ /* [partition_state] */
+ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_leader_and_isr_request_partition_state);
+ }
/* [live_leader] */
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
@@ -1479,7 +2475,7 @@ dissect_kafka_leader_and_isr_response_partition(tvbuff_t *tvb, packet_info *pinf
kafka_error_t error;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
- ett_kafka_response_partition,
+ ett_kafka_partition,
&subti, "Partition");
/* topic */
@@ -1520,6 +2516,41 @@ dissect_kafka_leader_and_isr_response(tvbuff_t *tvb, packet_info *pinfo, proto_t
/* STOP_REPLICA REQUEST/RESPONSE */
static int
+dissect_kafka_stop_replica_request_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ return offset;
+}
+
+static int
+dissect_kafka_stop_replica_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topic,
+ &subti, "Topic");
+
+ /* topic */
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_stop_replica_request_topic_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
dissect_kafka_stop_replica_request_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
int offset, kafka_api_version_t api_version _U_)
{
@@ -1529,7 +2560,7 @@ dissect_kafka_stop_replica_request_partition(tvbuff_t *tvb, packet_info *pinfo _
kafka_partition_t partition;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
- ett_kafka_request_partition,
+ ett_kafka_partition,
&subti, "Partition");
/* topic */
@@ -1564,6 +2595,12 @@ dissect_kafka_stop_replica_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree
proto_tree_add_item(tree, hf_kafka_controller_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
+ if (api_version >= 1) {
+ /* broker_epoch */
+ proto_tree_add_item(tree, hf_kafka_broker_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
/* delete_partitions */
proto_tree_add_item(tree, hf_kafka_delete_partitions, tvb, offset, 1, ENC_BIG_ENDIAN);
offset += 1;
@@ -1572,6 +2609,11 @@ dissect_kafka_stop_replica_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
&dissect_kafka_stop_replica_request_partition);
+ if (api_version >= 1) {
+ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_stop_replica_request_topic);
+ }
+
col_append_fstr(pinfo->cinfo, COL_INFO, " (Controller-ID=%d)", controller_id);
return offset;
@@ -1588,7 +2630,7 @@ dissect_kafka_stop_replica_response_partition(tvbuff_t *tvb, packet_info *pinfo,
kafka_partition_t partition;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
- ett_kafka_response_partition,
+ ett_kafka_partition,
&subti, "Partition");
/* topic */
@@ -1637,11 +2679,16 @@ dissect_kafka_fetch_request_partition(tvbuff_t *tvb, packet_info *pinfo, proto_t
kafka_packet_values_t packet_values;
memset(&packet_values, 0, sizeof(packet_values));
- subtree = proto_tree_add_subtree(tree, tvb, offset, 16, ett_kafka_request_partition, &ti, "Fetch Request Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, 16, ett_kafka_partition, &ti, "Partition");
- offset = dissect_kafka_partition_id_get_value(tvb, pinfo, subtree, offset, &packet_values);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &packet_values.partition_id);
+
+ if (api_version >= 9) {
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
- offset = dissect_kafka_offset_get_value(tvb, pinfo, subtree, offset, &packet_values);
+ offset = dissect_kafka_offset_ret(tvb, pinfo, subtree, offset, &packet_values.offset);
if (api_version >= 5) {
proto_tree_add_item(subtree, hf_kafka_log_start_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
@@ -1651,7 +2698,7 @@ dissect_kafka_fetch_request_partition(tvbuff_t *tvb, packet_info *pinfo, proto_t
proto_tree_add_item(subtree, hf_kafka_max_bytes, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
- proto_item_append_text(ti, " (Partition-ID=%u, Offset=%" G_GINT64_MODIFIER "u)",
+ proto_item_append_text(ti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)",
packet_values.partition_id, packet_values.offset);
return offset;
@@ -1667,7 +2714,7 @@ dissect_kafka_fetch_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree
guint32 count;
int name_start, name_length;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_topic, &ti, "Fetch Request Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &name_start, &name_length);
count = tvb_get_ntohl(tvb, offset);
@@ -1681,6 +2728,38 @@ dissect_kafka_fetch_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree
}
static int
+dissect_kafka_fetch_request_forgottent_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_forgotten_topic_partition, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ return offset;
+}
+
+static int
+dissect_kafka_fetch_request_forgotten_topics_data(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int start_offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *ti;
+ proto_tree *subtree;
+ int offset = start_offset;
+ guint32 count;
+ int name_start, name_length;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_forgotten_topic, &ti, "Fetch Request Forgotten Topic Data");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_forgotten_topic_name, tvb, pinfo, offset, &name_start, &name_length);
+ count = tvb_get_ntohl(tvb, offset);
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_fetch_request_forgottent_topic_partition);
+
+ proto_item_set_len(ti, offset - start_offset);
+ proto_item_append_text(ti, " (%u partitions)", count);
+
+ return offset;
+}
+
+static int
dissect_kafka_fetch_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
@@ -1703,8 +2782,26 @@ dissect_kafka_fetch_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
offset += 1;
}
+ if (api_version >= 7) {
+ proto_tree_add_item(tree, hf_kafka_fetch_session_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
+ if (api_version >= 7) {
+ proto_tree_add_item(tree, hf_kafka_fetch_session_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version, &dissect_kafka_fetch_request_topic);
+ if (api_version >= 7) {
+ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version, &dissect_kafka_fetch_request_forgotten_topics_data);
+ }
+
+ if (api_version >= 11) {
+ offset = dissect_kafka_string(tree, hf_kafka_rack, tvb, pinfo, offset, NULL, NULL);
+ }
+
return offset;
}
@@ -1716,7 +2813,7 @@ dissect_kafka_aborted_transaction(tvbuff_t *tvb, packet_info *pinfo _U_, proto_t
proto_tree *subtree;
int offset = start_offset;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_topic, &ti, "Fetch Response Aborted Transaction");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_aborted_transaction, &ti, "Transaction");
proto_tree_add_item(subtree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
offset += 8;
@@ -1725,6 +2822,24 @@ dissect_kafka_aborted_transaction(tvbuff_t *tvb, packet_info *pinfo _U_, proto_t
offset += 8;
proto_item_set_len(ti, offset - start_offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_aborted_transactions(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int start_offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *ti;
+ proto_tree *subtree;
+ int offset = start_offset;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_aborted_transactions, &ti, "Aborted Transactions");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version, &dissect_kafka_aborted_transaction);
+
+ proto_item_set_len(ti, offset - start_offset);
+
return offset;
}
@@ -1738,31 +2853,33 @@ dissect_kafka_fetch_response_partition(tvbuff_t *tvb, packet_info *pinfo, proto_
kafka_packet_values_t packet_values;
memset(&packet_values, 0, sizeof(packet_values));
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_partition, &ti, "Fetch Response Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &ti, "Partition");
- offset = dissect_kafka_partition_id_get_value(tvb, pinfo, subtree, offset, &packet_values);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &packet_values.partition_id);
offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
- offset = dissect_kafka_offset_get_value(tvb, pinfo, subtree, offset, &packet_values);
+ offset = dissect_kafka_offset_ret(tvb, pinfo, subtree, offset, &packet_values.offset);
if (api_version >= 4) {
proto_tree_add_item(subtree, hf_kafka_last_stable_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
offset += 8;
+ }
- if (api_version >= 5) {
- proto_tree_add_item(subtree, hf_kafka_log_start_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
- offset += 8;
- }
+ if (api_version >= 5) {
+ proto_tree_add_item(subtree, hf_kafka_log_start_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+ }
- offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version, &dissect_kafka_aborted_transaction);
+ if (api_version >= 4) {
+ offset = dissect_kafka_aborted_transactions(tvb, pinfo, subtree, offset, api_version);
}
offset = dissect_kafka_message_set(tvb, pinfo, subtree, offset, TRUE, KAFKA_MESSAGE_CODEC_NONE);
proto_item_set_len(ti, offset - start_offset);
- proto_item_append_text(ti, " (Partition-ID=%u, Offset=%" G_GINT64_MODIFIER "u)",
+ proto_item_append_text(ti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)",
packet_values.partition_id, packet_values.offset);
return offset;
@@ -1777,7 +2894,7 @@ dissect_kafka_fetch_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree
int offset = start_offset;
guint32 count;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_topic, &ti, "Fetch Response Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
count = tvb_get_ntohl(tvb, offset);
@@ -1795,8 +2912,20 @@ dissect_kafka_fetch_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree
kafka_api_version_t api_version)
{
if (api_version >= 1) {
- /* Throttle time */
- proto_tree_add_item(tree, hf_kafka_throttle_time, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
+ if (api_version >= 7) {
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+ }
+
+ if (api_version >= 7) {
+ proto_tree_add_item(tree, hf_kafka_fetch_session_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
+ if (api_version >= 11) {
+ proto_tree_add_item(tree, hf_kafka_replica, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
}
@@ -1814,13 +2943,14 @@ dissect_kafka_produce_request_partition(tvbuff_t *tvb, packet_info *pinfo, proto
kafka_packet_values_t packet_values;
memset(&packet_values, 0, sizeof(packet_values));
- subtree = proto_tree_add_subtree(tree, tvb, offset, 14, ett_kafka_request_partition, &ti, "Produce Request Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, 14, ett_kafka_partition, &ti, "Partition");
- offset = dissect_kafka_partition_id_get_value(tvb, pinfo, subtree, offset, &packet_values);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &packet_values.partition_id);
offset = dissect_kafka_message_set(tvb, pinfo, subtree, offset, TRUE, KAFKA_MESSAGE_CODEC_NONE);
- proto_item_append_text(ti, " (Partition-ID=%u)", packet_values.partition_id);
+ proto_item_append_text(ti, " (ID=%u)", packet_values.partition_id);
+ proto_item_set_end(ti, tvb, offset);
return offset;
}
@@ -1832,14 +2962,17 @@ dissect_kafka_produce_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tre
proto_item *ti;
proto_tree *subtree;
int offset = start_offset;
+ int topic_off, topic_len;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_topic, &ti, "Produce Request Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
- offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_off, &topic_len);
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
&dissect_kafka_produce_request_partition);
- proto_item_set_len(ti, offset - start_offset);
+ proto_item_append_text(ti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb, topic_off, topic_len, ENC_UTF_8|ENC_NA));
+ proto_item_set_end(ti, tvb, offset);
return offset;
}
@@ -1873,24 +3006,24 @@ dissect_kafka_produce_response_partition(tvbuff_t *tvb, packet_info *pinfo, prot
kafka_packet_values_t packet_values;
memset(&packet_values, 0, sizeof(packet_values));
- subtree = proto_tree_add_subtree(tree, tvb, offset, 14, ett_kafka_response_partition, &ti, "Produce Response Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, 14, ett_kafka_partition, &ti, "Partition");
- offset = dissect_kafka_partition_id_get_value(tvb, pinfo, subtree, offset, &packet_values);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &packet_values.partition_id);
offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
- offset = dissect_kafka_offset_get_value(tvb, pinfo, subtree, offset, &packet_values);
+ offset = dissect_kafka_offset_ret(tvb, pinfo, subtree, offset, &packet_values.offset);
if (api_version >= 2) {
offset = dissect_kafka_offset_time(tvb, pinfo, subtree, offset, api_version);
}
if (api_version >= 5) {
- proto_tree_add_item(tree, hf_kafka_log_start_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ proto_tree_add_item(subtree, hf_kafka_log_start_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
offset += 8;
}
- proto_item_append_text(ti, " (Partition-ID=%u, Offset=%" G_GINT64_MODIFIER "u)",
+ proto_item_append_text(ti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)",
packet_values.partition_id, packet_values.offset);
return offset;
@@ -1904,7 +3037,7 @@ dissect_kafka_produce_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tr
proto_tree *subtree;
int offset = start_offset;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_topic, &ti, "Produce Response Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
@@ -1922,9 +3055,7 @@ dissect_kafka_produce_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tr
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version, &dissect_kafka_produce_response_topic);
if (api_version >= 1) {
- /* Throttle time */
- proto_tree_add_item(tree, hf_kafka_throttle_time, tvb, offset, 4, ENC_BIG_ENDIAN);
- offset += 4;
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
}
return offset;
@@ -1939,10 +3070,16 @@ dissect_kafka_offsets_request_partition(tvbuff_t *tvb, packet_info *pinfo, proto
proto_item *ti;
proto_tree *subtree;
int offset = start_offset;
+ kafka_partition_t partition;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_partition, &ti, "Offset Request Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &ti, "Partition");
- offset = dissect_kafka_partition_id(tvb, pinfo, subtree, offset, api_version);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &partition);
+
+ if (api_version >= 4) {
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
offset = dissect_kafka_offset_time(tvb, pinfo, subtree, offset, api_version);
@@ -1952,6 +3089,7 @@ dissect_kafka_offsets_request_partition(tvbuff_t *tvb, packet_info *pinfo, proto
}
proto_item_set_len(ti, offset - start_offset);
+ proto_item_append_text(ti, " (ID=%u)", partition);
return offset;
}
@@ -1964,7 +3102,7 @@ dissect_kafka_offsets_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tre
proto_tree *subtree;
int offset = start_offset;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_topic, &ti, "Offset Request Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
@@ -1982,6 +3120,11 @@ dissect_kafka_offsets_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tre
proto_tree_add_item(tree, hf_kafka_replica, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
+ if (api_version >= 2) {
+ proto_tree_add_item(tree, hf_kafka_isolation_level, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version, &dissect_kafka_offsets_request_topic);
return offset;
@@ -1994,10 +3137,11 @@ dissect_kafka_offsets_response_partition(tvbuff_t *tvb, packet_info *pinfo, prot
proto_item *ti;
proto_tree *subtree;
int offset = start_offset;
+ kafka_partition_t partition;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_partition, &ti, "Offset Response Partition");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &ti, "Partition");
- offset = dissect_kafka_partition_id(tvb, pinfo, subtree, offset, api_version);
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &partition);
offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
@@ -2007,10 +3151,16 @@ dissect_kafka_offsets_response_partition(tvbuff_t *tvb, packet_info *pinfo, prot
else if (api_version >= 1) {
offset = dissect_kafka_offset_time(tvb, pinfo, subtree, offset, api_version);
- offset = dissect_kafka_offset(tvb, pinfo, subtree, offset, api_version);
+ offset = dissect_kafka_offset_ret(tvb, pinfo, subtree, offset, NULL);
+ }
+
+ if (api_version >= 4) {
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
}
proto_item_set_len(ti, offset - start_offset);
+ proto_item_append_text(ti, " (ID=%u)", partition);
return offset;
}
@@ -2023,7 +3173,7 @@ dissect_kafka_offsets_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tr
proto_tree *subtree;
int offset = start_offset;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_topic, &ti, "Offset Response Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &ti, "Topic");
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
@@ -2035,9 +3185,15 @@ dissect_kafka_offsets_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tr
}
static int
-dissect_kafka_offsets_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+dissect_kafka_offsets_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int start_offset,
kafka_api_version_t api_version)
{
+ int offset = start_offset;
+
+ if (api_version >= 2) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
return dissect_kafka_array(tree, tvb, pinfo, offset, api_version, &dissect_kafka_offsets_response_topic);
}
@@ -2133,6 +3289,9 @@ dissect_kafka_api_versions_response(tvbuff_t *tvb, packet_info *pinfo, proto_tre
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
&dissect_kafka_api_versions_response_api_version);
+ if (api_version >= 1) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
return offset;
}
@@ -2170,12 +3329,14 @@ dissect_kafka_update_metadata_request_partition_state(tvbuff_t *tvb, packet_info
kafka_partition_t partition;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
- ett_kafka_request_partition,
- &subti, "Partition State");
+ ett_kafka_partition,
+ &subti, "Partition");
- /* topic */
- offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
- &topic_start, &topic_len);
+ if (api_version < 5) {
+ /* topic */
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
+ &topic_start, &topic_len);
+ }
/* partition */
partition = (gint32) tvb_get_ntohl(tvb, offset);
@@ -2215,10 +3376,41 @@ dissect_kafka_update_metadata_request_partition_state(tvbuff_t *tvb, packet_info
proto_item_set_end(subsubti, tvb, offset);
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (Topic=%s, Partition-ID=%u)",
+
+ if (api_version >= 5) {
+ proto_item_append_text(subti, " (Partition-ID=%u)",
+ partition);
+ } else {
+ proto_item_append_text(subti, " (Topic=%s, Partition-ID=%u)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA),
+ partition);
+ }
+ return offset;
+}
+
+static int
+dissect_kafka_update_metadata_request_topic_state(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_tree *subtree;
+ proto_item *subti;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topic,
+ &subti, "Topic");
+ /* topic */
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
+ &topic_start, &topic_len);
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_update_metadata_request_partition_state);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
tvb_get_string_enc(wmem_packet_scope(), tvb,
- topic_start, topic_len, ENC_UTF_8|ENC_NA),
- partition);
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
return offset;
}
@@ -2244,6 +3436,11 @@ dissect_kafka_update_metadata_request_end_point(tvbuff_t *tvb, packet_info *pinf
/* host */
offset = dissect_kafka_string(subtree, hf_kafka_broker_host, tvb, pinfo, offset, &host_start, &host_len);
+ if (api_version >= 3) {
+ /* listener_name */
+ offset = dissect_kafka_string(subtree, hf_kafka_listener_name, tvb, pinfo, offset, NULL, NULL);
+ }
+
/* security_protocol_type */
security_protocol_type = (gint16) tvb_get_ntohs(tvb, offset);
proto_tree_add_item(subtree, hf_kafka_broker_security_protocol_type, tvb, offset, 2, ENC_BIG_ENDIAN);
@@ -2300,7 +3497,7 @@ dissect_kafka_update_metadata_request_live_leader(tvbuff_t *tvb, packet_info *pi
if (api_version >= 2) {
/* rack */
- offset = dissect_kafka_string(subtree, hf_kafka_broker_rack, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_string(subtree, hf_kafka_rack, tvb, pinfo, offset, NULL, NULL);
}
proto_item_append_text(subti, " (node %d)",
@@ -2327,9 +3524,21 @@ dissect_kafka_update_metadata_request(tvbuff_t *tvb, packet_info *pinfo, proto_t
proto_tree_add_item(tree, hf_kafka_controller_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
- /* [partition_state] */
- offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
- &dissect_kafka_update_metadata_request_partition_state);
+ if (api_version >= 5) {
+ /* controller_epoch */
+ proto_tree_add_item(tree, hf_kafka_broker_epoch, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+ }
+
+ if (api_version >= 5) {
+ /* [topic_state] */
+ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_update_metadata_request_topic_state);
+ } else {
+ /* [partition_state] */
+ offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_update_metadata_request_partition_state);
+ }
/* [live_leader] */
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
@@ -2363,6 +3572,11 @@ dissect_kafka_controlled_shutdown_request(tvbuff_t *tvb, packet_info *pinfo, pro
proto_tree_add_item(tree, hf_kafka_broker_nodeid, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
+ if (api_version >= 2) {
+ proto_tree_add_item(tree, hf_kafka_broker_epoch, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+ }
+
col_append_fstr(pinfo->cinfo, COL_INFO, " (Broker-ID=%d)", broker_id);
return offset;
@@ -2378,7 +3592,7 @@ dissect_kafka_controlled_shutdown_response_partition_remaining(tvbuff_t *tvb, pa
int topic_start, topic_len;
kafka_partition_t partition;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_topic, &subti,
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti,
"Partition Remaining");
/* topic */
@@ -2424,7 +3638,7 @@ dissect_kafka_offset_commit_request_partition(tvbuff_t *tvb, packet_info *pinfo,
kafka_partition_t partition_id;
kafka_offset_t partition_offset;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_partition, &subti,
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti,
"Partition");
/* partition */
@@ -2437,6 +3651,11 @@ dissect_kafka_offset_commit_request_partition(tvbuff_t *tvb, packet_info *pinfo,
proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
offset += 8;
+ if (api_version >= 6) {
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
if (api_version == 1) {
/* timestamp */
offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_commit_timestamp, offset);
@@ -2446,7 +3665,7 @@ dissect_kafka_offset_commit_request_partition(tvbuff_t *tvb, packet_info *pinfo,
offset = dissect_kafka_string(subtree, hf_kafka_metadata, tvb, pinfo, offset, NULL, NULL);
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (Partition-ID=%u, Offset=%" G_GINT64_MODIFIER "u)",
+ proto_item_append_text(subti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)",
partition_id, partition_offset);
return offset;
@@ -2460,7 +3679,7 @@ dissect_kafka_offset_commit_request_topic(tvbuff_t *tvb, packet_info *pinfo, pro
proto_tree *subtree;
int topic_start, topic_len;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_request_topic, &subti, "Topic");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
/* topic */
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
@@ -2496,7 +3715,12 @@ dissect_kafka_offset_commit_request(tvbuff_t *tvb, packet_info *pinfo, proto_tre
/* member_id */
offset = dissect_kafka_string(tree, hf_kafka_member_id, tvb, pinfo, offset, NULL, NULL);
- if (api_version >= 2) {
+ if (api_version >= 7){
+ /* instance_id */
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group_instance, tvb, pinfo, offset, NULL, NULL);
+ }
+
+ if (api_version >= 2 && api_version < 5) {
/* retention_time */
proto_tree_add_item(tree, hf_kafka_retention_time, tvb, offset, 8, ENC_BIG_ENDIAN);
offset += 8;
@@ -2524,8 +3748,8 @@ dissect_kafka_offset_commit_response_partition_response(tvbuff_t *tvb, packet_in
kafka_partition_t partition;
kafka_error_t error;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_partition, &subti,
- "Partition Response");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti,
+ "Partition");
/* partition */
partition = (gint32) tvb_get_ntohl(tvb, offset);
@@ -2550,7 +3774,7 @@ dissect_kafka_offset_commit_response_response(tvbuff_t *tvb, packet_info *pinfo,
proto_tree *subtree;
int topic_start, topic_len;
- subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_response_topic, &subti, "Response");
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
/* topic */
offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset,
@@ -2561,7 +3785,7 @@ dissect_kafka_offset_commit_response_response(tvbuff_t *tvb, packet_info *pinfo,
&dissect_kafka_offset_commit_response_partition_response);
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (Topic=%s)",
+ proto_item_append_text(subti, " (Name=%s)",
tvb_get_string_enc(wmem_packet_scope(), tvb,
topic_start, topic_len, ENC_UTF_8|ENC_NA));
@@ -2572,9 +3796,8 @@ static int
dissect_kafka_offset_commit_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
- if (api_version >= 1) {
- proto_tree_add_item(tree, hf_kafka_throttle_time, tvb, offset, 4, ENC_BIG_ENDIAN);
- offset += 4;
+ if (api_version >= 3) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
}
/* [responses] */
@@ -2587,25 +3810,35 @@ dissect_kafka_offset_commit_response(tvbuff_t *tvb, packet_info *pinfo, proto_tr
/* GROUP_COORDINATOR REQUEST/RESPONSE */
static int
-dissect_kafka_group_coordinator_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+dissect_kafka_find_coordinator_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
int group_start, group_len;
- /* group_id */
- offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset,
- &group_start, &group_len);
+ if (api_version == 0) {
+ /* group_id */
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset,
+ &group_start, &group_len);
- col_append_fstr(pinfo->cinfo, COL_INFO,
- " (Group=%s)",
- tvb_get_string_enc(wmem_packet_scope(), tvb,
- group_start, group_len, ENC_UTF_8|ENC_NA));
+ col_append_fstr(pinfo->cinfo, COL_INFO,
+ " (Group=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ group_start, group_len, ENC_UTF_8|ENC_NA));
+ } else {
+
+ offset = dissect_kafka_string(tree, hf_kafka_coordinator_key, tvb, pinfo, offset,
+ NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_coordinator_type, tvb, offset, 1, ENC_NA);
+ offset += 4;
+
+ }
return offset;
}
static int
-dissect_kafka_group_coordinator_response_coordinator(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+dissect_kafka_find_coordinator_response_coordinator(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
int offset, kafka_api_version_t api_version _U_)
{
proto_item *subti;
@@ -2631,24 +3864,38 @@ dissect_kafka_group_coordinator_response_coordinator(tvbuff_t *tvb, packet_info
offset += 4;
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (node %u: %s:%u)",
- node_id,
- tvb_get_string_enc(wmem_packet_scope(), tvb,
- host_start, host_len, ENC_UTF_8|ENC_NA),
- port);
+
+ if (node_id >= 0) {
+ proto_item_append_text(subti, " (node %d: %s:%d)",
+ node_id,
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ host_start, host_len, ENC_UTF_8|ENC_NA),
+ port);
+ } else {
+ proto_item_append_text(subti, " (none)");
+ }
return offset;
}
static int
-dissect_kafka_group_coordinator_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+dissect_kafka_find_coordinator_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
+ if (api_version >= 1) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* error_code */
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+ if (api_version >= 1) {
+ offset = dissect_kafka_string(tree, hf_kafka_error_message, tvb, pinfo, offset,
+ NULL, NULL);
+ }
+
/* coordinator */
- offset = dissect_kafka_group_coordinator_response_coordinator(tvb, pinfo, tree, offset, api_version);
+ offset = dissect_kafka_find_coordinator_response_coordinator(tvb, pinfo, tree, offset, api_version);
return offset;
}
@@ -2708,6 +3955,12 @@ dissect_kafka_join_group_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *
offset = dissect_kafka_string(tree, hf_kafka_member_id, tvb, pinfo, offset,
&member_start, &member_len);
+ if (api_version >= 5) {
+ /* instance id */
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group_instance, tvb, pinfo, offset,
+ NULL, NULL);
+ }
+
/* protocol_type */
offset = dissect_kafka_string(tree, hf_kafka_protocol_type, tvb, pinfo, offset, NULL, NULL);
@@ -2730,7 +3983,7 @@ dissect_kafka_join_group_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *
static int
dissect_kafka_join_group_response_member(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
- int offset, kafka_api_version_t api_version _U_)
+ int offset, kafka_api_version_t api_version)
{
proto_item *subti;
proto_tree *subtree;
@@ -2742,11 +3995,17 @@ dissect_kafka_join_group_response_member(tvbuff_t *tvb, packet_info *pinfo, prot
offset = dissect_kafka_string(subtree, hf_kafka_member_id, tvb, pinfo, offset,
&member_start, &member_len);
+ if (api_version >= 5) {
+ /* instance id */
+ offset = dissect_kafka_string(subtree, hf_kafka_consumer_group_instance, tvb, pinfo, offset,
+ NULL, NULL);
+ }
+
/* member_metadata */
offset = dissect_kafka_bytes(subtree, hf_kafka_member_metadata, tvb, pinfo, offset, NULL, NULL);
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (Member-ID=%s)",
+ proto_item_append_text(subti, " (Member=%s)",
tvb_get_string_enc(wmem_packet_scope(), tvb,
member_start, member_len, ENC_UTF_8|ENC_NA));
@@ -2761,6 +4020,10 @@ dissect_kafka_join_group_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree
proto_tree *subtree;
int member_start, member_len;
+ if (api_version >= 2) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* error_code */
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
@@ -2813,6 +4076,12 @@ dissect_kafka_heartbeat_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *t
offset = dissect_kafka_string(tree, hf_kafka_member_id, tvb, pinfo, offset,
&member_start, &member_len);
+ if (api_version >= 3) {
+ /* instance_id */
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group_instance, tvb, pinfo, offset,
+ NULL, NULL);
+ }
+
col_append_fstr(pinfo->cinfo, COL_INFO,
" (Group=%s, Member=%s)",
tvb_get_string_enc(wmem_packet_scope(), tvb,
@@ -2828,8 +4097,7 @@ dissect_kafka_heartbeat_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *
kafka_api_version_t api_version _U_)
{
if (api_version >= 1) {
- proto_tree_add_item(tree, hf_kafka_throttle_time, tvb, offset, 4, ENC_BIG_ENDIAN);
- offset += 4;
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
}
/* error_code */
@@ -2841,26 +4109,108 @@ dissect_kafka_heartbeat_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *
/* LEAVE_GROUP REQUEST/RESPONSE */
static int
-dissect_kafka_leave_group_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+dissect_kafka_leave_group_request_member(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
+ proto_item *subti;
+ proto_tree *subtree;
+ int member_start, member_len;
+ int instance_start, instance_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_group_member, &subti, "Member");
+
+ /* member_id */
+ offset = dissect_kafka_string(subtree, hf_kafka_member_id, tvb, pinfo, offset,
+ &member_start, &member_len);
+
+ /* instance_id */
+ offset = dissect_kafka_string(subtree, hf_kafka_consumer_group_instance, tvb, pinfo, offset,
+ &instance_start, &instance_len);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Member=%s, Group-Instance=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ member_start, member_len, ENC_UTF_8|ENC_NA),
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ instance_start, instance_len, ENC_UTF_8|ENC_NA)
+ );
+
+ return offset;
+}
+
+static int
+dissect_kafka_leave_group_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
int group_start, group_len;
int member_start, member_len;
+ proto_item *subti;
+ proto_tree *subtree;
/* group_id */
offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset,
&group_start, &group_len);
+ if (api_version >= 0 && api_version <= 2) {
+
+ /* member_id */
+ offset = dissect_kafka_string(tree, hf_kafka_member_id, tvb, pinfo, offset,
+ &member_start, &member_len);
+
+ col_append_fstr(pinfo->cinfo, COL_INFO,
+ " (Group=%s, Member=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ group_start, group_len, ENC_UTF_8|ENC_NA),
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ member_start, member_len, ENC_UTF_8|ENC_NA));
+
+ } else if (api_version >= 3) {
+
+ // KIP-345
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_group_members, &subti, "Members");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_leave_group_request_member);
+ proto_item_set_end(subti, tvb, offset);
+
+ col_append_fstr(pinfo->cinfo, COL_INFO,
+ " (Group=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ group_start, group_len, ENC_UTF_8|ENC_NA));
+
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_leave_group_response_member(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+ int member_start, member_len;
+ int instance_start, instance_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_group_member, &subti, "Member");
+
/* member_id */
- offset = dissect_kafka_string(tree, hf_kafka_member_id, tvb, pinfo, offset,
+ offset = dissect_kafka_string(subtree, hf_kafka_member_id, tvb, pinfo, offset,
&member_start, &member_len);
- col_append_fstr(pinfo->cinfo, COL_INFO,
- " (Group=%s, Member=%s)",
- tvb_get_string_enc(wmem_packet_scope(), tvb,
- group_start, group_len, ENC_UTF_8|ENC_NA),
- tvb_get_string_enc(wmem_packet_scope(), tvb,
- member_start, member_len, ENC_UTF_8|ENC_NA));
+ /* instance_id */
+ offset = dissect_kafka_string(subtree, hf_kafka_consumer_group_instance, tvb, pinfo, offset,
+ &instance_start, &instance_len);
+
+ /* error_code */
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Member=%s, Group-Instance=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ member_start, member_len, ENC_UTF_8|ENC_NA),
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ instance_start, instance_len, ENC_UTF_8|ENC_NA)
+ );
return offset;
}
@@ -2869,9 +4219,27 @@ static int
dissect_kafka_leave_group_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ if (api_version >= 1) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* error_code */
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+ if (api_version >= 3) {
+
+ // KIP-345
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_group_members, &subti, "Members");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_leave_group_response_member);
+ proto_item_set_end(subti, tvb, offset);
+
+ }
+
+
return offset;
}
@@ -2924,6 +4292,12 @@ dissect_kafka_sync_group_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *
offset = dissect_kafka_string(tree, hf_kafka_member_id, tvb, pinfo, offset,
&member_start, &member_len);
+ if (api_version >= 3) {
+ /* instance_id */
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group_instance, tvb, pinfo, offset,
+ NULL, NULL);
+ }
+
/* [group_assignment] */
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_group_assignments, &subti,
"Group Assignments");
@@ -2945,6 +4319,10 @@ static int
dissect_kafka_sync_group_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version _U_)
{
+ if (api_version >= 1) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* error_code */
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
@@ -2974,6 +4352,11 @@ dissect_kafka_describe_groups_request(tvbuff_t *tvb, packet_info *pinfo, proto_t
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
&dissect_kafka_describe_groups_request_group_id);
+ if (api_version >= 3) {
+ proto_tree_add_item(tree, hf_kafka_include_group_authorized_ops, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
return offset;
}
@@ -2984,6 +4367,7 @@ dissect_kafka_describe_groups_response_member(tvbuff_t *tvb, packet_info *pinfo,
proto_item *subti;
proto_tree *subtree;
int member_start, member_len;
+ int instance_start, instance_len;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_group_member, &subti, "Member");
@@ -2991,6 +4375,12 @@ dissect_kafka_describe_groups_response_member(tvbuff_t *tvb, packet_info *pinfo,
offset = dissect_kafka_string(subtree, hf_kafka_member_id, tvb, pinfo, offset,
&member_start, &member_len);
+ if (api_version >= 4) {
+ /* instance_id */
+ offset = dissect_kafka_string(subtree, hf_kafka_consumer_group_instance, tvb, pinfo, offset,
+ &instance_start, &instance_len);
+ }
+
/* client_id */
offset = dissect_kafka_string(subtree, hf_kafka_client_id, tvb, pinfo, offset, NULL, NULL);
@@ -3004,9 +4394,18 @@ dissect_kafka_describe_groups_response_member(tvbuff_t *tvb, packet_info *pinfo,
offset = dissect_kafka_bytes(subtree, hf_kafka_member_assignment, tvb, pinfo, offset, NULL, NULL);
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (Member-ID=%s)",
- tvb_get_string_enc(wmem_packet_scope(), tvb,
- member_start, member_len, ENC_UTF_8|ENC_NA));
+
+ if (api_version < 4) {
+ proto_item_append_text(subti, " (Member=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ member_start, member_len, ENC_UTF_8|ENC_NA));
+ } else {
+ proto_item_append_text(subti, " (Member=%s, Instance=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ member_start, member_len, ENC_UTF_8|ENC_NA),
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ instance_start, instance_len, ENC_UTF_8|ENC_NA));
+ }
return offset;
}
@@ -3043,8 +4442,13 @@ dissect_kafka_describe_groups_response_group(tvbuff_t *tvb, packet_info *pinfo,
&dissect_kafka_describe_groups_response_member);
proto_item_set_end(subsubti, tvb, offset);
+ if (api_version >= 3) {
+ proto_tree_add_item(subtree, hf_kafka_group_authorized_ops, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
proto_item_set_end(subti, tvb, offset);
- proto_item_append_text(subti, " (Group-ID=%s)",
+ proto_item_append_text(subti, " (Group=%s)",
tvb_get_string_enc(wmem_packet_scope(), tvb,
group_start, group_len, ENC_UTF_8|ENC_NA));
@@ -3055,6 +4459,10 @@ static int
dissect_kafka_describe_groups_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
+ if (api_version >= 1) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* [group] */
offset = dissect_kafka_array(tree, tvb, pinfo, offset, api_version,
&dissect_kafka_describe_groups_response_group);
@@ -3104,6 +4512,10 @@ static int
dissect_kafka_list_groups_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
kafka_api_version_t api_version)
{
+ if (api_version >= 1) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* error_code */
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
@@ -3293,6 +4705,12 @@ dissect_kafka_create_topics_request(tvbuff_t *tvb, packet_info *pinfo, proto_tre
proto_tree_add_item(tree, hf_kafka_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
offset += 4;
+ if (api_version >= 1) {
+ /* validate */
+ proto_tree_add_item(tree, hf_kafka_validate_only, tvb, offset, 1, ENC_NA);
+ offset += 1;
+ }
+
return offset;
}
@@ -3331,6 +4749,10 @@ dissect_kafka_create_topics_response(tvbuff_t *tvb, packet_info *pinfo, proto_tr
proto_item *subti;
proto_tree *subtree;
+ if (api_version >= 2) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* [topic_error_code] */
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
ett_kafka_topics,
@@ -3411,6 +4833,10 @@ dissect_kafka_delete_topics_response(tvbuff_t *tvb, packet_info *pinfo, proto_tr
proto_item *subti;
proto_tree *subtree;
+ if (api_version >= 3) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
/* [topic_error_code] */
subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
ett_kafka_topics,
@@ -3422,6 +4848,2820 @@ dissect_kafka_delete_topics_response(tvbuff_t *tvb, packet_info *pinfo, proto_tr
return offset;
}
+/* DELETE_RECORDS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_delete_records_request_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint32 partition_id;
+ gint64 partition_offset;
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ partition_offset = tvb_get_ntohi64(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ if (partition_offset == -1) {
+ proto_item_append_text(subti, " (ID=%u, Offset=HWM)", partition_id);
+ } else {
+ proto_item_append_text(subti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)", partition_id, partition_offset);
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_records_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_records_request_topic_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_records_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ /* [topic] */
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_records_request_topic);
+ proto_item_set_end(subti, tvb, offset);
+
+ /* timeout */
+ proto_tree_add_item(tree, hf_kafka_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_records_response_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint32 partition_id;
+ gint64 partition_offset; // low watermark
+ kafka_error_t partition_error_code;
+
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ partition_offset = tvb_get_ntohi64(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ partition_error_code = (kafka_error_t) tvb_get_ntohs(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_error, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ if (partition_error_code == 0) {
+ proto_item_append_text(subti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)", partition_id, partition_offset);
+ } else {
+ proto_item_append_text(subti, " (ID=%u, Error=%s)", partition_id, kafka_error_to_str(partition_error_code));
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_records_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_records_response_topic_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_delete_records_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ /* [topic_error_code] */
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_records_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* INIT_PRODUCER_ID REQUEST/RESPONSE */
+
+static int
+dissect_kafka_init_producer_id_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_string(tree, hf_kafka_transactional_id, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_transaction_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_init_producer_id_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ proto_tree_add_item(tree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(tree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ return offset;
+}
+
+/* OFFSET_FOR_LEADER_EPOCH REQUEST/RESPONSE */
+
+static int
+dissect_kafka_offset_for_leader_epoch_request_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ guint32 partition_id;
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+
+ if (api_version >= 2) {
+ proto_tree_add_item(subtree, hf_kafka_current_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ }
+
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ proto_item_append_text(subti, " (ID=%u)", partition_id);
+
+ return offset;
+}
+
+static int
+dissect_kafka_offset_for_leader_epoch_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_offset_for_leader_epoch_request_topic_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_offset_for_leader_epoch_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+ gint32 replica_id;
+
+ if (api_version >= 3) {
+ replica_id = tvb_get_ntohl(tvb, offset);
+ subti = proto_tree_add_item(tree, hf_kafka_replica, tvb, offset, 4, ENC_BIG_ENDIAN);
+ if (replica_id==-2) {
+ proto_item_append_text(subti, " (debug)");
+ } else if (replica_id==-1) {
+ proto_item_append_text(subti, " (consumer)");
+ }
+ offset += 4;
+ }
+
+ /* [topic] */
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_offset_for_leader_epoch_request_topic);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_offset_for_leader_epoch_response_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint32 partition_id;
+ kafka_error_t partition_error_code;
+
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_error_code = (kafka_error_t) tvb_get_ntohs(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_error, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ if (api_version >= 1) {
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
+ proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ if (partition_error_code == 0) {
+ proto_item_append_text(subti, " (ID=%u)", partition_id);
+ } else {
+ proto_item_append_text(subti, " (ID=%u, Error=%s)", partition_id, kafka_error_to_str(partition_error_code));
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_offset_for_leader_epoch_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_offset_for_leader_epoch_response_topic_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_offset_for_leader_epoch_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ if (api_version >= 2) {
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+ }
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_offset_for_leader_epoch_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* ADD_PARTITIONS_TO_TXN REQUEST/RESPONSE */
+
+static int
+dissect_kafka_add_partitions_to_txn_request_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+
+ return offset;
+}
+
+static int
+dissect_kafka_add_partitions_to_txn_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_add_partitions_to_txn_request_topic_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_add_partitions_to_txn_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_string(tree, hf_kafka_transactional_id, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(tree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ /* [topic] */
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_add_partitions_to_txn_request_topic);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_add_partitions_to_txn_response_topic_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint32 partition_id;
+ kafka_error_t partition_error_code;
+
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ partition_error_code = (kafka_error_t) tvb_get_ntohs(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_error, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ if (partition_error_code == 0) {
+ proto_item_append_text(subti, " (ID=%u)", partition_id);
+ } else {
+ proto_item_append_text(subti, " (ID=%u, Error=%s)", partition_id, kafka_error_to_str(partition_error_code));
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_add_partitions_to_txn_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_add_partitions_to_txn_response_topic_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_add_partitions_to_txn_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_add_partitions_to_txn_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* ADD_OFFSETS_TO_TXN REQUEST/RESPONSE */
+
+static int
+dissect_kafka_add_offsets_to_txn_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_string(tree, hf_kafka_transactional_id, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(tree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset, NULL, NULL);
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_add_offsets_to_txn_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ return offset;
+}
+
+/* END_TXN REQUEST/RESPONSE */
+
+static int
+dissect_kafka_end_txn_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_string(tree, hf_kafka_transactional_id, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(tree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ proto_tree_add_item(tree, hf_kafka_transaction_result, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_end_txn_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ return offset;
+}
+
+/* WRITE_TXN_MARKERS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_write_txn_markers_request_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_write_txn_markers_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_write_txn_markers_request_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_write_txn_markers_request_marker(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ guint64 producer_id;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_marker,
+ &subti, "Marker");
+
+ producer_id = tvb_get_ntoh64(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(subtree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ proto_tree_add_item(subtree, hf_kafka_transaction_result, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subsubti, "Topics");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_write_txn_markers_request_topic);
+
+ proto_tree_add_item(subsubtree, hf_kafka_coordinator_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ proto_item_set_end(subsubti, tvb, offset);
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Producer=%" G_GINT64_MODIFIER "u)", producer_id);
+
+ return offset;
+}
+
+static int
+dissect_kafka_write_txn_markers_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ /* [topic] */
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_markers,
+ &subti, "Markers");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_write_txn_markers_request_marker);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_write_txn_markers_response_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint32 partition_id;
+ kafka_error_t partition_error_code;
+
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ partition_error_code = (kafka_error_t) tvb_get_ntohs(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_error, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ if (partition_error_code == 0) {
+ proto_item_append_text(subti, " (ID=%u", partition_id);
+ } else {
+ proto_item_append_text(subti, " (ID=%u, Error=%s)", partition_id, kafka_error_to_str(partition_error_code));
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_write_txn_markers_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_write_txn_markers_response_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_write_txn_markers_response_marker(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint64 producer_id;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_marker, &subti, "Marker");
+
+ producer_id = tvb_get_ntoh64(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Topics");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_write_txn_markers_response_topic);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Producer=%" G_GINT64_MODIFIER "u)", producer_id);
+
+ return offset;
+}
+
+static int
+dissect_kafka_write_txn_markers_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_markers,
+ &subti, "Markers");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_write_txn_markers_response_marker);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* TXN_OFFSET_COMMIT REQUEST/RESPONSE */
+
+static int
+dissect_kafka_txn_offset_commit_request_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint32 partition_id;
+ gint64 partition_offset;
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ partition_offset = tvb_get_ntohi64(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_offset, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ if (api_version >= 2) {
+ proto_tree_add_item(subtree, hf_kafka_leader_epoch, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+ }
+
+ offset = dissect_kafka_string(subtree, hf_kafka_metadata, tvb, pinfo, offset, NULL, NULL);
+ proto_item_set_end(subti, tvb, offset);
+
+ proto_item_append_text(subti, " (ID=%u, Offset=%" G_GINT64_MODIFIER "i)", partition_id, partition_offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_txn_offset_commit_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_txn_offset_commit_request_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_txn_offset_commit_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_string(tree, hf_kafka_transactional_id, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(tree, hf_kafka_producer_epoch, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_txn_offset_commit_request_topic);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_txn_offset_commit_response_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ guint32 partition_id;
+ kafka_error_t partition_error_code;
+
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ partition_error_code = (kafka_error_t) tvb_get_ntohs(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_error, tvb, offset, 2, ENC_BIG_ENDIAN);
+ offset += 2;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ if (partition_error_code == 0) {
+ proto_item_append_text(subti, " (ID=%u)", partition_id);
+ } else {
+ proto_item_append_text(subti, " (ID=%u, Error=%s)", partition_id, kafka_error_to_str(partition_error_code));
+ }
+
+ return offset;
+}
+
+static int
+dissect_kafka_txn_offset_commit_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ int topic_start, topic_len;
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_txn_offset_commit_response_partition);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Topic=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_txn_offset_commit_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ /* [topic_error_code] */
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_txn_offset_commit_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* DESCRIBE_ACLS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_describe_acls_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_tree_add_item(tree, hf_kafka_acl_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(tree, hf_kafka_acl_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ if (api_version >= 1) {
+ proto_tree_add_item(tree, hf_kafka_acl_resource_pattern_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ offset = dissect_kafka_string(tree, hf_kafka_acl_principal, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(tree, hf_kafka_acl_host, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_acl_operation, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_tree_add_item(tree, hf_kafka_acl_permission_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_acls_response_resource_acl(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_acl, &subti, "ACL");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_principal, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_host, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_acl_operation, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_tree_add_item(subtree, hf_kafka_acl_permission_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_acls_response_resource(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Resource");
+
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ if (api_version >= 1) {
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_pattern_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_acls, &subsubti, "ACLs");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_acls_response_resource_acl);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_describe_acls_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_string(tree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_resources,
+ &subti, "Resources");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_acls_response_resource);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* CREATE_ACLS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_create_acls_request_creation(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_acl_creation, &subti, "Creation");
+
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ if (api_version >= 1) {
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_pattern_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_principal, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_host, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_acl_operation, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_tree_add_item(subtree, hf_kafka_acl_permission_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_acls_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_acl_creations,
+ &subti, "Creations");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_create_acls_request_creation);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_acls_response_creation(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_acl_creation, &subti, "Creation");
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_acls_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_acl_creations,
+ &subti, "Creations");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_create_acls_response_creation);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* DELETE_ACLS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_delete_acls_request_filter(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_acl_filter, &subti, "Filter");
+
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ if (api_version >= 1) {
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_pattern_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_principal, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_host, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_acl_operation, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_tree_add_item(subtree, hf_kafka_acl_permission_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_acls_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_acl_filter,
+ &subti, "Filters");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_acls_request_filter);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_acls_response_match(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_acl_filter_match, &subti, "Match");
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ if (api_version >= 1) {
+ proto_tree_add_item(subtree, hf_kafka_acl_resource_pattern_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_principal, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_acl_host, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_acl_operation, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_tree_add_item(subtree, hf_kafka_acl_permission_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_acls_response_filter(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_acl_creation, &subti, "Filter");
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_acl_filter_matches,
+ &subsubti, "Matches");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_acls_response_match);
+
+ proto_item_set_end(subsubti, tvb, offset);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_acls_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_acl_creations,
+ &subti, "Filters");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_acls_response_filter);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* DESCRIBE_CONFIGS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_describe_config_request_entry(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_string(tree, hf_kafka_config_key, tvb, pinfo, offset, NULL, NULL);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_config_request_resource(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Resource");
+
+ proto_tree_add_item(subtree, hf_kafka_config_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_config_entries, &subsubti, "Entries");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_config_request_entry);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_configs_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_resources,
+ &subti, "Resources");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_config_request_resource);
+
+ if (api_version >= 1) {
+ proto_tree_add_item(subtree, hf_kafka_config_include_synonyms, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_configs_response_synonym(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+ int key_start, key_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_config_synonym, &subti, "Synonym");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_key, tvb, pinfo, offset, &key_start, &key_len);
+ offset = dissect_kafka_string(subtree, hf_kafka_config_value, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_config_source, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Key=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ key_start, key_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_configs_response_entry(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ int key_start, key_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_config_entry, &subti, "Entry");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_key, tvb, pinfo, offset, &key_start, &key_len);
+ offset = dissect_kafka_string(subtree, hf_kafka_config_value, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_config_readonly, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ if (api_version == 0) {
+ proto_tree_add_item(subtree, hf_kafka_config_default, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ } else {
+ proto_tree_add_item(subtree, hf_kafka_config_source, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ proto_tree_add_item(subtree, hf_kafka_config_sensitive, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ if (api_version >= 1) {
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_config_synonyms,
+ &subsubti, "Synonyms");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_configs_response_synonym);
+
+ proto_item_set_end(subsubti, tvb, offset);
+ }
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Key=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ key_start, key_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_configs_response_resource(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Resource");
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_config_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_config_entries,
+ &subsubti, "Entries");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_configs_response_entry);
+
+ proto_item_set_end(subsubti, tvb, offset);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_configs_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_resources,
+ &subti, "Resources");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_configs_response_resource);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* ALTER_CONFIGS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_alter_config_request_entry(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_config_entry, &subti, "Entry");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_key, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_string(subtree, hf_kafka_config_value, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_config_request_resource(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Resource");
+
+ proto_tree_add_item(subtree, hf_kafka_config_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_config_entries, &subsubti, "Entries");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_config_request_entry);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_configs_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_resources,
+ &subti, "Resources");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_config_request_resource);
+
+ proto_tree_add_item(subtree, hf_kafka_validate_only, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_configs_response_resource(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Resource");
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_config_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_configs_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_resources,
+ &subti, "Resources");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_configs_response_resource);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* ALTER_REPLICA_LOG_DIRS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_alter_replica_log_dirs_request_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_replica_log_dirs_request_topic(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topics, &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_replica_log_dirs_request_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_replica_log_dirs_request_log_dir(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_log_dir, &subti, "Log Directory");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_log_dir, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topics, &subsubti, "Topics");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_replica_log_dirs_request_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_replica_log_dirs_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_log_dirs,
+ &subti, "Log Directories");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_replica_log_dirs_request_log_dir);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_replica_log_dirs_response_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+ int partition_id;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ proto_item_append_text(subti, " (ID=%u)", partition_id);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_replica_log_dirs_response_topic(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_log_dir, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partition");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_replica_log_dirs_response_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_replica_log_dirs_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_replica_log_dirs_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* DESCRIBE_LOG_DIRS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_describe_log_dirs_request_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_log_dirs_request_topic(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_log_dirs_request_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_log_dirs_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_log_dirs_request_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_log_dirs_response_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ int partition_id;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ partition_id = tvb_get_ntohl(tvb, offset);
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ proto_tree_add_item(subtree, hf_kafka_segment_size, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(subtree, hf_kafka_offset_lag, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ proto_tree_add_item(subtree, hf_kafka_future, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (ID=%u)", partition_id);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_log_dirs_response_topic(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_log_dirs_response_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_log_dirs_response_log_dir(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ int dir_start, dir_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_log_dir, &subti, "Log Directory");
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_log_dir, tvb, pinfo, offset, &dir_start, &dir_len);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_topics, &subsubti, "Topics");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_log_dirs_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Dir=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ dir_start, dir_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_log_dirs_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_log_dirs,
+ &subti, "Log Directories");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_log_dirs_response_log_dir);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* CREATE_PARTITIONS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_create_partitions_request_broker(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_broker_nodeid, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_partitions_request_topic(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ proto_tree_add_item(subtree, hf_kafka_partition_count, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1, ett_kafka_brokers, &subsubti, "Brokers");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_create_partitions_request_broker);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_partitions_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_create_partitions_request_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ proto_tree_add_item(tree, hf_kafka_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ proto_tree_add_item(tree, hf_kafka_validate_only, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_partitions_response_topic(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+ int topic_start, topic_len;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, &topic_start, &topic_len);
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+ proto_item_append_text(subti, " (Name=%s)",
+ tvb_get_string_enc(wmem_packet_scope(), tvb,
+ topic_start, topic_len, ENC_UTF_8|ENC_NA));
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_partitions_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_create_partitions_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* SASL_AUTHENTICATE REQUEST/RESPONSE */
+
+static int
+dissect_kafka_sasl_authenticate_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_bytes(tree, hf_kafka_sasl_auth_bytes, tvb, pinfo, offset, NULL, NULL);
+
+ return offset;
+}
+
+
+static int
+dissect_kafka_sasl_authenticate_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_string(tree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_bytes(tree, hf_kafka_sasl_auth_bytes, tvb, pinfo, offset, NULL, NULL);
+
+ if (api_version >= 1) {
+ proto_tree_add_item(tree, hf_kafka_session_lifetime_ms, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+ }
+
+ return offset;
+}
+
+/* CREATE_DELEGATION_TOKEN REQUEST/RESPONSE */
+
+static int
+dissect_kafka_create_delegation_token_request_renewer(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_renewer, &subti, "Renewer");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_type, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_name, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_delegation_token_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_renewers,
+ &subti, "Renewers");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_create_delegation_token_request_renewer);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ proto_tree_add_item(tree, hf_kafka_token_max_life_time, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ return offset;
+}
+
+static int
+dissect_kafka_create_delegation_token_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_string(tree, hf_kafka_token_principal_type, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_string(tree, hf_kafka_token_principal_name, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_issue_timestamp, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_expiry_timestamp, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_max_timestamp, offset);
+
+ offset = dissect_kafka_string(tree, hf_kafka_token_id, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_bytes(tree, hf_kafka_token_hmac, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ return offset;
+}
+
+/* RENEW_DELEGATION_TOKEN REQUEST/RESPONSE */
+
+static int
+dissect_kafka_renew_delegation_token_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_bytes(tree, hf_kafka_token_hmac, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_token_renew_time, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ return offset;
+}
+
+static int
+dissect_kafka_renew_delegation_token_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_expiry_timestamp, offset);
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ return offset;
+}
+
+/* EXPIRE_DELEGATION_TOKEN REQUEST/RESPONSE */
+
+static int
+dissect_kafka_expire_delegation_token_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_bytes(tree, hf_kafka_token_hmac, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(tree, hf_kafka_token_expiry_time, tvb, offset, 8, ENC_BIG_ENDIAN);
+ offset += 8;
+
+ return offset;
+}
+
+static int
+dissect_kafka_expire_delegation_token_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_expiry_timestamp, offset);
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ return offset;
+}
+
+/* DESCRIBE_DELEGATION_TOKEN REQUEST/RESPONSE */
+
+static int
+dissect_kafka_describe_delegation_token_request_owner(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_owner, &subti, "Owner");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_type, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_name, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_delegation_token_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_owners,
+ &subti, "Owners");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_delegation_token_request_owner);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_delegation_token_response_renewer(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_renewer, &subti, "Renewer");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_type, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_name, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_delegation_token_response_token(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_token, &subti, "Token");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_type, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_string(subtree, hf_kafka_token_principal_name, tvb, pinfo, offset, NULL, NULL);
+
+ offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_token_issue_timestamp, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_token_expiry_timestamp, offset);
+ offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_token_max_timestamp, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_token_id, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_bytes(subtree, hf_kafka_token_hmac, tvb, pinfo, offset, NULL, NULL);
+
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_renewers,
+ &subsubti, "Renewers");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_delegation_token_response_renewer);
+
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_describe_delegation_token_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_tokens,
+ &subti, "Tokens");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_describe_delegation_token_response_token);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ return offset;
+}
+
+/* DELETE_GROUPS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_delete_groups_request_group(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ offset = dissect_kafka_string(tree, hf_kafka_consumer_group, tvb, pinfo, offset, NULL, NULL);
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_groups_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_groups,
+ &subti, "Groups");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_groups_request_group);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_groups_response_group(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_group, &subti, "Group");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_consumer_group, tvb, pinfo, offset, NULL, NULL);
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_delete_groups_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_groups,
+ &subti, "Groups");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_delete_groups_response_group);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* ELECT_LEADERS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_elect_leaders_request_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_elect_leaders_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topic,
+ &subti, "Topic");
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_partitions,
+ &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_elect_leaders_request_partition);
+
+ proto_item_set_end(subsubti, tvb, offset);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_elect_leaders_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ if (api_version >= 1) {
+ proto_tree_add_item(tree, hf_kafka_election_type, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 1;
+ }
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_elect_leaders_request_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ proto_tree_add_item(tree, hf_kafka_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_elect_leaders_response_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_partition,
+ &subti, "Partition");
+
+ proto_tree_add_item(subtree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_elect_leaders_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topic,
+ &subti, "Topic");
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_partitions,
+ &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_elect_leaders_response_partition);
+
+ proto_item_set_end(subsubti, tvb, offset);
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_elect_leaders_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ if (api_version >= 1) {
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+ }
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_elect_leaders_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* INCREMENTAL_ALTER_CONFIGS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_inc_alter_config_request_entry(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_config_entry, &subti, "Entry");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_key, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_config_operation, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_value, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_inc_alter_config_request_resource(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Resource");
+
+ proto_tree_add_item(subtree, hf_kafka_config_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_config_entries, &subsubti, "Entries");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_inc_alter_config_request_entry);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_inc_alter_configs_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_resources,
+ &subti, "Resources");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_inc_alter_config_request_resource);
+
+ proto_tree_add_item(subtree, hf_kafka_validate_only, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_inc_alter_configs_response_resource(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_resource, &subti, "Resource");
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ proto_tree_add_item(subtree, hf_kafka_config_resource_type, tvb, offset, 1, ENC_BIG_ENDIAN);
+ offset += 1;
+
+ offset = dissect_kafka_string(subtree, hf_kafka_config_resource_name, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_inc_alter_configs_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_resources,
+ &subti, "Resources");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_inc_alter_configs_response_resource);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* ALTER_PARTITION_REASSIGNMENTS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_alter_partition_reassignments_request_replica(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_replica, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_partition_reassignments_request_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ kafka_partition_t partition;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &partition);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Replicas");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_partition_reassignments_request_replica);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_partition_reassignments_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_partition_reassignments_request_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_partition_reassignments_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ proto_tree_add_item(tree, hf_kafka_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_partition_reassignments_request_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_partition_reassignments_response_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+ kafka_partition_t partition;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &partition);
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_partition_reassignments_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_partition_reassignments_response_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_alter_partition_reassignments_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_string(tree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_alter_partition_reassignments_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+/* LIST_PARTITION_REASSIGNMENTS REQUEST/RESPONSE */
+
+static int
+dissect_kafka_list_partition_reassignments_request_partition(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_partition_id, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_list_partition_reassignments_request_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_list_partition_reassignments_request_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_list_partition_reassignments_request(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ proto_tree_add_item(tree, hf_kafka_timeout, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_list_partition_reassignments_request_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_list_partition_reassignments_response_replica(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
+ int offset, kafka_api_version_t api_version _U_)
+{
+ proto_tree_add_item(tree, hf_kafka_replica, tvb, offset, 4, ENC_BIG_ENDIAN);
+ offset += 4;
+
+ return offset;
+}
+
+static int
+dissect_kafka_list_partition_reassignments_response_partition(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+ kafka_partition_t partition;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partition, &subti, "Partition");
+
+ offset = dissect_kafka_partition_id_ret(tvb, pinfo, subtree, offset, &partition);
+
+ offset = dissect_kafka_error(tvb, pinfo, subtree, offset);
+
+ offset = dissect_kafka_string(subtree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_replicas,
+ &subsubti, "Current Replicas");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_list_partition_reassignments_response_replica);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_replicas,
+ &subsubti, "Adding Replicas");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_list_partition_reassignments_response_replica);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ subsubtree = proto_tree_add_subtree(subtree, tvb, offset, -1,
+ ett_kafka_replicas,
+ &subsubti, "Removing Replicas");
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_list_partition_reassignments_response_replica);
+ proto_item_set_end(subsubti, tvb, offset);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_list_partition_reassignments_response_topic(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ int offset, kafka_api_version_t api_version)
+{
+ proto_item *subti, *subsubti;
+ proto_tree *subtree, *subsubtree;
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_topic, &subti, "Topic");
+
+ offset = dissect_kafka_string(subtree, hf_kafka_topic_name, tvb, pinfo, offset, NULL, NULL);
+
+ subsubtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_partitions, &subsubti, "Partitions");
+
+ offset = dissect_kafka_array(subsubtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_list_partition_reassignments_response_partition);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
+static int
+dissect_kafka_list_partition_reassignments_response(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, int offset,
+ kafka_api_version_t api_version)
+{
+ proto_item *subti;
+ proto_tree *subtree;
+
+ offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_error(tvb, pinfo, tree, offset);
+
+ offset = dissect_kafka_string(tree, hf_kafka_error_message, tvb, pinfo, offset, NULL, NULL);
+
+ subtree = proto_tree_add_subtree(tree, tvb, offset, -1,
+ ett_kafka_topics,
+ &subti, "Topics");
+ offset = dissect_kafka_array(subtree, tvb, pinfo, offset, api_version,
+ &dissect_kafka_list_partition_reassignments_response_topic);
+
+ proto_item_set_end(subti, tvb, offset);
+
+ return offset;
+}
+
/* MAIN */
static int
@@ -3456,7 +7696,7 @@ dissect_kafka(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U
matcher = (kafka_query_response_t *) p_get_proto_data(wmem_file_scope(), pinfo, proto_kafka, 0);
}
- if (value_is_in_range(current_kafka_tcp_range, pinfo->destport)) {
+ if (pinfo->destport == pinfo->match_uint) {
/* Request (as directed towards server port) */
if (matcher == NULL) {
matcher = wmem_new(wmem_file_scope(), kafka_query_response_t);
@@ -3493,11 +7733,18 @@ dissect_kafka(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U
proto_item_set_generated(ti);
}
+
ti = proto_tree_add_item(kafka_tree, hf_kafka_request_api_key, tvb, offset, 2, ENC_BIG_ENDIAN);
+ proto_item_set_hidden(ti);
+
+ ti = proto_tree_add_item(kafka_tree, hf_kafka_api_key, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
kafka_check_supported_api_key(pinfo, ti, matcher);
ti = proto_tree_add_item(kafka_tree, hf_kafka_request_api_version, tvb, offset, 2, ENC_BIG_ENDIAN);
+ proto_item_set_hidden(ti);
+
+ ti = proto_tree_add_item(kafka_tree, hf_kafka_api_version, tvb, offset, 2, ENC_BIG_ENDIAN);
offset += 2;
kafka_check_supported_api_version(pinfo, ti, matcher);
@@ -3542,8 +7789,8 @@ dissect_kafka(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U
case KAFKA_OFFSET_FETCH:
/*offset =*/ dissect_kafka_offset_fetch_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
break;
- case KAFKA_GROUP_COORDINATOR:
- /*offset =*/ dissect_kafka_group_coordinator_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ case KAFKA_FIND_COORDINATOR:
+ /*offset =*/ dissect_kafka_find_coordinator_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
break;
case KAFKA_JOIN_GROUP:
/*offset =*/ dissect_kafka_join_group_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
@@ -3575,6 +7822,84 @@ dissect_kafka(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U
case KAFKA_DELETE_TOPICS:
/*offset =*/ dissect_kafka_delete_topics_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
break;
+ case KAFKA_DELETE_RECORDS:
+ /*offset =*/ dissect_kafka_delete_records_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_INIT_PRODUCER_ID:
+ /*offset =*/ dissect_kafka_init_producer_id_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_OFFSET_FOR_LEADER_EPOCH:
+ /*offset =*/ dissect_kafka_offset_for_leader_epoch_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ADD_PARTITIONS_TO_TXN:
+ /*offset =*/ dissect_kafka_add_partitions_to_txn_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ADD_OFFSETS_TO_TXN:
+ /*offset =*/ dissect_kafka_add_offsets_to_txn_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_END_TXN:
+ /*offset =*/ dissect_kafka_end_txn_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_WRITE_TXN_MARKERS:
+ /*offset =*/ dissect_kafka_write_txn_markers_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_TXN_OFFSET_COMMIT:
+ /*offset =*/ dissect_kafka_txn_offset_commit_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_ACLS:
+ /*offset =*/ dissect_kafka_describe_acls_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_CREATE_ACLS:
+ /*offset =*/ dissect_kafka_create_acls_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DELETE_ACLS:
+ /*offset =*/ dissect_kafka_delete_acls_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_CONFIGS:
+ /*offset =*/ dissect_kafka_describe_configs_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ALTER_CONFIGS:
+ /*offset =*/ dissect_kafka_alter_configs_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ALTER_REPLICA_LOG_DIRS:
+ /*offset =*/ dissect_kafka_alter_replica_log_dirs_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_LOG_DIRS:
+ /*offset =*/ dissect_kafka_describe_log_dirs_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_CREATE_PARTITIONS:
+ /*offset =*/ dissect_kafka_create_partitions_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_SASL_AUTHENTICATE:
+ /*offset =*/ dissect_kafka_sasl_authenticate_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_CREATE_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_create_delegation_token_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_RENEW_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_renew_delegation_token_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_EXPIRE_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_expire_delegation_token_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_describe_delegation_token_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DELETE_GROUPS:
+ /*offset =*/ dissect_kafka_delete_groups_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ELECT_LEADERS:
+ /*offset =*/ dissect_kafka_elect_leaders_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_INC_ALTER_CONFIGS:
+ /*offset =*/ dissect_kafka_inc_alter_configs_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ALTER_PARTITION_REASSIGNMENTS:
+ /*offset =*/ dissect_kafka_alter_partition_reassignments_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_LIST_PARTITION_REASSIGNMENTS:
+ /*offset =*/ dissect_kafka_list_partition_reassignments_request(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
}
}
else {
@@ -3619,12 +7944,20 @@ dissect_kafka(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U
ti = proto_tree_add_int(kafka_tree, hf_kafka_response_api_key, tvb,
0, 0, matcher->api_key);
proto_item_set_generated(ti);
+ proto_item_set_hidden(ti);
+ ti = proto_tree_add_int(kafka_tree, hf_kafka_api_key, tvb,
+ 0, 0, matcher->api_key);
+ proto_item_set_generated(ti);
kafka_check_supported_api_key(pinfo, ti, matcher);
/* Also show api version from request */
ti = proto_tree_add_int(kafka_tree, hf_kafka_response_api_version, tvb,
0, 0, matcher->api_version);
proto_item_set_generated(ti);
+ proto_item_set_hidden(ti);
+ ti = proto_tree_add_int(kafka_tree, hf_kafka_response_api_version, tvb,
+ 0, 0, matcher->api_version);
+ proto_item_set_generated(ti);
kafka_check_supported_api_version(pinfo, ti, matcher);
switch (matcher->api_key) {
@@ -3658,8 +7991,8 @@ dissect_kafka(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U
case KAFKA_OFFSET_FETCH:
/*offset =*/ dissect_kafka_offset_fetch_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
break;
- case KAFKA_GROUP_COORDINATOR:
- /*offset =*/ dissect_kafka_group_coordinator_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ case KAFKA_FIND_COORDINATOR:
+ /*offset =*/ dissect_kafka_find_coordinator_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
break;
case KAFKA_JOIN_GROUP:
/*offset =*/ dissect_kafka_join_group_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
@@ -3691,6 +8024,84 @@ dissect_kafka(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void* data _U
case KAFKA_DELETE_TOPICS:
/*offset =*/ dissect_kafka_delete_topics_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
break;
+ case KAFKA_DELETE_RECORDS:
+ /*offset =*/ dissect_kafka_delete_records_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_INIT_PRODUCER_ID:
+ /*offset =*/ dissect_kafka_init_producer_id_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_OFFSET_FOR_LEADER_EPOCH:
+ /*offset =*/ dissect_kafka_offset_for_leader_epoch_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ADD_PARTITIONS_TO_TXN:
+ /*offset =*/ dissect_kafka_add_partitions_to_txn_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ADD_OFFSETS_TO_TXN:
+ /*offset =*/ dissect_kafka_add_offsets_to_txn_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_END_TXN:
+ /*offset =*/ dissect_kafka_end_txn_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_WRITE_TXN_MARKERS:
+ /*offset =*/ dissect_kafka_write_txn_markers_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_TXN_OFFSET_COMMIT:
+ /*offset =*/ dissect_kafka_txn_offset_commit_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_ACLS:
+ /*offset =*/ dissect_kafka_describe_acls_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_CREATE_ACLS:
+ /*offset =*/ dissect_kafka_create_acls_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DELETE_ACLS:
+ /*offset =*/ dissect_kafka_delete_acls_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_CONFIGS:
+ /*offset =*/ dissect_kafka_describe_configs_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ALTER_CONFIGS:
+ /*offset =*/ dissect_kafka_alter_configs_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ALTER_REPLICA_LOG_DIRS:
+ /*offset =*/ dissect_kafka_alter_replica_log_dirs_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_LOG_DIRS:
+ /*offset =*/ dissect_kafka_describe_log_dirs_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_CREATE_PARTITIONS:
+ /*offset =*/ dissect_kafka_create_partitions_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_SASL_AUTHENTICATE:
+ /*offset =*/ dissect_kafka_sasl_authenticate_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_CREATE_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_create_delegation_token_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_RENEW_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_renew_delegation_token_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_EXPIRE_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_expire_delegation_token_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DESCRIBE_DELEGATION_TOKEN:
+ /*offset =*/ dissect_kafka_describe_delegation_token_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_DELETE_GROUPS:
+ /*offset =*/ dissect_kafka_delete_groups_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ELECT_LEADERS:
+ /*offset =*/ dissect_kafka_elect_leaders_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_INC_ALTER_CONFIGS:
+ /*offset =*/ dissect_kafka_inc_alter_configs_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_ALTER_PARTITION_REASSIGNMENTS:
+ /*offset =*/ dissect_kafka_alter_partition_reassignments_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
+ case KAFKA_LIST_PARTITION_REASSIGNMENTS:
+ /*offset =*/ dissect_kafka_list_partition_reassignments_response(tvb, pinfo, kafka_tree, offset, matcher->api_version);
+ break;
}
}
@@ -3708,12 +8119,6 @@ dissect_kafka_tcp(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
return tvb_captured_length(tvb);
}
-
-static void
-apply_kafka_prefs(void) {
- current_kafka_tcp_range = prefs_get_range_value("kafka", "tcp.port");
-}
-
static void
compute_kafka_api_names(void)
{
@@ -3770,7 +8175,7 @@ proto_register_kafka(void)
},
{ &hf_kafka_metadata,
{ "Metadata", "kafka.metadata",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_error,
@@ -3778,6 +8183,23 @@ proto_register_kafka(void)
FT_INT16, BASE_DEC, VALS(kafka_errors), 0,
NULL, HFILL }
},
+ { &hf_kafka_error_message,
+ { "Error Message", "kafka.error_message",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_api_key,
+ { "API Key", "kafka.api_key",
+ FT_INT16, BASE_DEC, VALS(kafka_api_names), 0,
+ "Request API Key.", HFILL }
+ },
+ { &hf_kafka_api_version,
+ { "API Version", "kafka.api_version",
+ FT_INT16, BASE_DEC, 0, 0,
+ "Request API Version.", HFILL }
+ },
+ // these should be deprecated
+ // --- begin ---
{ &hf_kafka_request_api_key,
{ "API Key", "kafka.request_key",
FT_INT16, BASE_DEC, VALS(kafka_api_names), 0,
@@ -3798,6 +8220,7 @@ proto_register_kafka(void)
FT_INT16, BASE_DEC, 0, 0,
"Response API Version.", HFILL }
},
+ // --- end ---
{ &hf_kafka_correlation_id,
{ "Correlation ID", "kafka.correlation_id",
FT_INT32, BASE_DEC, 0, 0,
@@ -3805,17 +8228,27 @@ proto_register_kafka(void)
},
{ &hf_kafka_client_id,
{ "Client ID", "kafka.client_id",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
"The ID of the sending client.", HFILL }
},
{ &hf_kafka_client_host,
{ "Client Host", "kafka.client_host",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_transactional_id,
{ "Transactional ID", "kafka.transactional_id",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_transaction_result,
+ { "Transaction Result", "kafka.transaction_result",
+ FT_INT8, BASE_DEC, VALS(kafka_transaction_results), 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_transaction_timeout,
+ { "Transaction Timeout", "kafka.transaction_timeout",
+ FT_INT32, BASE_DEC, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_string_len,
@@ -3828,11 +8261,6 @@ proto_register_kafka(void)
FT_INT32, BASE_DEC, 0, 0,
"Generic length for kafka-encoded bytes.", HFILL }
},
- { &hf_kafka_array_count,
- { "Array Count", "kafka.array_count",
- FT_INT32, BASE_DEC, 0, 0,
- NULL, HFILL }
- },
{ &hf_kafka_required_acks,
{ "Required Acks", "kafka.required_acks",
FT_INT16, BASE_DEC, VALS(kafka_acks), 0,
@@ -3845,7 +8273,7 @@ proto_register_kafka(void)
},
{ &hf_kafka_topic_name,
{ "Topic Name", "kafka.topic_name",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_UNICODE, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_producer_id,
@@ -3853,6 +8281,11 @@ proto_register_kafka(void)
FT_INT64, BASE_DEC, 0, 0,
NULL, HFILL }
},
+ { &hf_kafka_producer_epoch,
+ { "Producer Epoch", "kafka.producer_epoch",
+ FT_INT16, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
{ &hf_kafka_partition_id,
{ "Partition ID", "kafka.partition_id",
FT_INT32, BASE_DEC, 0, 0,
@@ -3873,10 +8306,10 @@ proto_register_kafka(void)
FT_INT32, BASE_DEC, 0, 0,
NULL, HFILL }
},
- { &hf_kafka_partition_leader,
- { "Leader", "kafka.leader",
- FT_INT32, BASE_DEC, 0, 0,
- NULL, HFILL }
+ { &hf_kafka_offline,
+ { "Offline Replica ID", "kafka.offline_id",
+ FT_INT32, BASE_DEC, 0, 0,
+ NULL, HFILL }
},
{ &hf_kafka_message_set_size,
{ "Message Set Size", "kafka.message_set_size",
@@ -3908,6 +8341,61 @@ proto_register_kafka(void)
FT_UINT8, BASE_DEC, VALS(kafka_message_timestamp_types), KAFKA_MESSAGE_TIMESTAMP_MASK,
NULL, HFILL }
},
+ { &hf_kafka_batch_crc,
+ { "CRC32", "kafka.batch_crc",
+ FT_UINT32, BASE_HEX, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_magic,
+ { "Magic Byte", "kafka.batch_magic",
+ FT_INT8, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_codec,
+ { "Compression Codec", "kafka.batch_codec",
+ FT_UINT16, BASE_DEC, VALS(kafka_message_codecs), KAFKA_MESSAGE_CODEC_MASK,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_timestamp_type,
+ { "Timestamp Type", "kafka.batch_timestamp_type",
+ FT_UINT16, BASE_DEC, VALS(kafka_message_timestamp_types), KAFKA_MESSAGE_TIMESTAMP_MASK,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_transactional,
+ { "Transactional", "kafka.batch_transactional",
+ FT_UINT16, BASE_DEC, VALS(kafka_batch_transactional_values), KAFKA_BATCH_TRANSACTIONAL_MASK,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_control_batch,
+ { "Control Batch", "kafka.batch_control_batch",
+ FT_UINT16, BASE_DEC, VALS(kafka_batch_control_batch_values), KAFKA_BATCH_CONTROL_BATCH_MASK,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_last_offset_delta,
+ { "Last Offset Delta", "kafka.batch_last_offset_delta",
+ FT_UINT32, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_first_timestamp,
+ { "First Timestamp", "kafka.batch_first_timestamp",
+ FT_ABSOLUTE_TIME, ABSOLUTE_TIME_UTC, NULL, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_last_timestamp,
+ { "Last Timestamp", "kafka.batch_last_timestamp",
+ FT_ABSOLUTE_TIME, ABSOLUTE_TIME_UTC, NULL, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_base_sequence,
+ { "Base Sequence", "kafka.batch_base_sequence",
+ FT_INT32, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_batch_size,
+ { "Size", "kafka.batch_size",
+ FT_UINT32, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
{ &hf_kafka_message_timestamp,
{ "Timestamp", "kafka.message_timestamp",
FT_ABSOLUTE_TIME, ABSOLUTE_TIME_UTC, NULL, 0,
@@ -3915,12 +8403,12 @@ proto_register_kafka(void)
},
{ &hf_kafka_message_key,
{ "Key", "kafka.message_key",
- FT_BYTES, BASE_NONE, 0, 0,
+ FT_BYTES, BASE_SHOW_ASCII_PRINTABLE, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_message_value,
{ "Value", "kafka.message_value",
- FT_BYTES, BASE_NONE, 0, 0,
+ FT_BYTES, BASE_SHOW_ASCII_PRINTABLE, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_message_value_compressed,
@@ -3935,7 +8423,22 @@ proto_register_kafka(void)
},
{ &hf_kafka_consumer_group,
{ "Consumer Group", "kafka.consumer_group",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_consumer_group_instance,
+ { "Consumer Group Instance", "kafka.consumer_group_instance",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_coordinator_key,
+ { "Coordinator Key", "kafka.coordinator_key",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_coordinator_type,
+ { "Coordinator Type", "kafka.coordinator_type",
+ FT_INT8, BASE_DEC, VALS(kafka_coordinator_types), 0,
NULL, HFILL }
},
{ &hf_kafka_request_frame,
@@ -3948,9 +8451,19 @@ proto_register_kafka(void)
FT_INT32, BASE_DEC, 0, 0,
NULL, HFILL }
},
+ { &hf_kafka_broker_epoch,
+ { "Broker Epoch", "kafka.broker_epoch",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
{ &hf_kafka_broker_host,
{ "Host", "kafka.host",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_listener_name,
+ { "Listener", "kafka.listener_name",
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_broker_port,
@@ -3958,9 +8471,9 @@ proto_register_kafka(void)
FT_INT32, BASE_DEC, 0, 0,
NULL, HFILL }
},
- { &hf_kafka_broker_rack,
+ { &hf_kafka_rack,
{ "Rack", "kafka.rack",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_broker_security_protocol_type,
@@ -3970,7 +8483,7 @@ proto_register_kafka(void)
},
{ &hf_kafka_cluster_id,
{ "Cluster ID", "kafka.cluster_id",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_controller_id,
@@ -3990,7 +8503,7 @@ proto_register_kafka(void)
},
{ &hf_kafka_group_leader_id,
{ "Leader ID", "kafka.group_leader_id",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_leader_id,
@@ -4003,6 +8516,11 @@ proto_register_kafka(void)
FT_INT32, BASE_DEC, 0, 0,
NULL, HFILL }
},
+ { &hf_kafka_current_leader_epoch,
+ { "Leader Epoch", "kafka.current_leader_epoch",
+ FT_INT32, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
{ &hf_kafka_is_internal,
{ "Is Internal", "kafka.is_internal",
FT_BOOLEAN, BASE_NONE, 0, 0,
@@ -4024,7 +8542,7 @@ proto_register_kafka(void)
},
{ &hf_kafka_isolation_level,
{ "Isolation Level", "kafka.isolation_level",
- FT_INT8, BASE_DEC, 0, 0,
+ FT_INT8, BASE_DEC, VALS(kafka_isolation_levels), 0,
NULL, HFILL }
},
{ &hf_kafka_max_wait_time,
@@ -4075,22 +8593,22 @@ proto_register_kafka(void)
},
{ &hf_kafka_group_state,
{ "State", "kafka.group_state",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_member_id,
{ "Consumer Group Member ID", "kafka.member_id",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_protocol_type,
{ "Protocol Type", "kafka.protocol_type",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_protocol_name,
{ "Protocol Name", "kafka.protocol_name",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_protocol_metadata,
@@ -4115,7 +8633,7 @@ proto_register_kafka(void)
},
{ &hf_kafka_sasl_mechanism,
{ "SASL Mechanism", "kafka.sasl_mechanism",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_num_partitions,
@@ -4130,12 +8648,17 @@ proto_register_kafka(void)
},
{ &hf_kafka_config_key,
{ "Key", "kafka.config_key",
- FT_STRING, BASE_NONE, 0, 0,
+ FT_STRING, STR_ASCII, 0, 0,
NULL, HFILL }
},
{ &hf_kafka_config_value,
- { "Key", "kafka.config_value",
- FT_STRING, BASE_NONE, 0, 0,
+ { "Value", "kafka.config_value",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_operation,
+ { "Operation", "kafka.config_operation",
+ FT_INT8, BASE_DEC, VALS(config_operations), 0,
NULL, HFILL }
},
{ &hf_kafka_commit_timestamp,
@@ -4148,23 +8671,275 @@ proto_register_kafka(void)
FT_INT64, BASE_DEC, 0, 0,
NULL, HFILL }
},
+ { &hf_kafka_forgotten_topic_name,
+ { "Forgotten Topic Name", "kafka.forgotten_topic_name",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_forgotten_topic_partition,
+ { "Forgotten Topic Partition", "kafka.forgotten_topic_partition",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_fetch_session_id,
+ { "Fetch Session ID", "kafka.fetch_session_id",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_fetch_session_epoch,
+ { "Fetch Session Epoch", "kafka.fetch_session_epoch",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_record_header_key,
+ { "Header Key", "kafka.header_key",
+ FT_STRING, STR_UNICODE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_record_header_value,
+ { "Header Value", "kafka.header_value",
+ FT_BYTES, BASE_SHOW_ASCII_PRINTABLE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_record_headers_count,
+ { "Headers Count", "kafka.headers_count",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_record_length,
+ { "Record Length", "kafka.record_length",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_record_attributes,
+ { "Record Attributes (reserved)", "kafka.record_attributes",
+ FT_INT8, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_allow_auto_topic_creation,
+ { "Allow Auto Topic Creation", "kafka.allow_auto_topic_creation",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_validate_only,
+ { "Only Validate the Request", "kafka.validate_only",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_coordinator_epoch,
+ { "Coordinator Epoch", "kafka.coordinator_epoch",
+ FT_INT32, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_sasl_auth_bytes,
+ { "SASL Authentication Bytes", "kafka.sasl_authentication",
+ FT_BYTES, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_session_lifetime_ms,
+ { "Session Lifetime (ms)", "kafka.session_lifetime_ms",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_acl_resource_type,
+ { "Resource Type", "kafka.acl_resource_type",
+ FT_INT8, BASE_DEC, VALS(acl_resource_types), 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_acl_resource_name,
+ { "Resource Name", "kafka.acl_resource_name",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_acl_resource_pattern_type,
+ { "Resource Pattern Type", "kafka.acl_resource_pattern_type",
+ FT_INT8, BASE_DEC, VALS(acl_resource_pattern_types), 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_acl_principal,
+ { "Principal", "kafka.acl_principal",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_acl_host,
+ { "Host", "kafka.acl_host",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_acl_operation,
+ { "Operation", "kafka.acl_operation",
+ FT_INT8, BASE_DEC, VALS(acl_operations), 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_acl_permission_type,
+ { "Permission Type", "kafka.acl_permission_type",
+ FT_INT8, BASE_DEC, VALS(acl_permission_types), 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_resource_type,
+ { "Resource Type", "kafka.config_resource_type",
+ FT_INT8, BASE_DEC, VALS(config_resource_types), 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_resource_name,
+ { "Resource Name", "kafka.config_resource_name",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_include_synonyms,
+ { "Include Synonyms", "kafka.config_include_synonyms",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_default,
+ { "Default", "kafka.config_default",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_readonly,
+ { "Readonly", "kafka.config_readonly",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_sensitive,
+ { "Sensitive", "kafka.config_sensitive",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_config_source,
+ { "Source", "kafka.config_source",
+ FT_INT8, BASE_DEC, VALS(config_sources), 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_log_dir,
+ { "Log Directory", "kafka.log_dir",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_segment_size,
+ { "Segment Size", "kafka.segment_size",
+ FT_UINT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_offset_lag,
+ { "Offset Lag", "kafka.offset_lag",
+ FT_UINT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_future,
+ { "Future", "kafka.future",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_partition_count,
+ { "Partition Count", "kafka.partition_count",
+ FT_UINT32, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_max_life_time,
+ { "Max Life Time", "kafka.token_max_life_time",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_renew_time,
+ { "Renew Time", "kafka.renew_time",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_expiry_time,
+ { "Expiry Time", "kafka.expiry_time",
+ FT_INT64, BASE_DEC, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_principal_type,
+ { "Principal Type", "kafka.principal_type",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_principal_name,
+ { "Principal Name", "kafka.principal_name",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_issue_timestamp,
+ { "Issue Timestamp", "kafka.token_issue_timestamp",
+ FT_ABSOLUTE_TIME, ABSOLUTE_TIME_UTC, NULL, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_expiry_timestamp,
+ { "Expiry Timestamp", "kafka.token_expiry_timestamp",
+ FT_ABSOLUTE_TIME, ABSOLUTE_TIME_UTC, NULL, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_max_timestamp,
+ { "Max Timestamp", "kafka.token_max_timestamp",
+ FT_ABSOLUTE_TIME, ABSOLUTE_TIME_UTC, NULL, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_id,
+ { "ID", "kafka.token_id",
+ FT_STRING, STR_ASCII, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_token_hmac,
+ { "HMAC", "kafka.token_hmac",
+ FT_BYTES, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_include_cluster_authorized_ops,
+ { "Include Cluster Authorized Operations", "kafka.include_cluster_authorized_ops",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_include_topic_authorized_ops,
+ { "Include Topic Authorized Operations", "kafka.include_topic_authorized_ops",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_cluster_authorized_ops,
+ { "Cluster Authorized Operations", "kafka.cluster_authorized_ops",
+ FT_UINT32, BASE_HEX, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_topic_authorized_ops,
+ { "Topic Authorized Operations", "kafka.topic_authorized_ops",
+ FT_UINT32, BASE_HEX, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_include_group_authorized_ops,
+ { "Include Group Authorized Operations", "kafka.include_group_authorized_ops",
+ FT_BOOLEAN, BASE_NONE, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_group_authorized_ops,
+ { "Group Authorized Operations", "kafka.group_authorized_ops",
+ FT_UINT32, BASE_HEX, 0, 0,
+ NULL, HFILL }
+ },
+ { &hf_kafka_election_type,
+ { "ElectionType", "kafka.election_type",
+ FT_INT8, BASE_DEC, VALS(election_types), 0,
+ NULL, HFILL }
+ },
};
static int *ett[] = {
&ett_kafka,
+ &ett_kafka_batch,
&ett_kafka_message,
&ett_kafka_message_set,
+ &ett_kafka_offline,
&ett_kafka_isrs,
&ett_kafka_replicas,
&ett_kafka_broker,
&ett_kafka_brokers,
&ett_kafka_broker_end_point,
+ &ett_kafka_markers,
+ &ett_kafka_marker,
&ett_kafka_topics,
&ett_kafka_topic,
- &ett_kafka_request_topic,
- &ett_kafka_request_partition,
- &ett_kafka_response_topic,
- &ett_kafka_response_partition,
+ &ett_kafka_partitions,
+ &ett_kafka_partition,
&ett_kafka_api_version,
&ett_kafka_group_protocols,
&ett_kafka_group_protocol,
@@ -4172,11 +8947,40 @@ proto_register_kafka(void)
&ett_kafka_group_member,
&ett_kafka_group_assignments,
&ett_kafka_group_assignment,
+ &ett_kafka_groups,
&ett_kafka_group,
&ett_kafka_sasl_enabled_mechanisms,
&ett_kafka_replica_assignment,
&ett_kafka_configs,
&ett_kafka_config,
+ &ett_kafka_request_forgotten_topic,
+ &ett_kafka_record,
+ &ett_kafka_record_headers,
+ &ett_kafka_record_headers_header,
+ &ett_kafka_aborted_transactions,
+ &ett_kafka_aborted_transaction,
+ &ett_kafka_resources,
+ &ett_kafka_resource,
+ &ett_kafka_acls,
+ &ett_kafka_acl,
+ &ett_kafka_acl_creations,
+ &ett_kafka_acl_creation,
+ &ett_kafka_acl_filters,
+ &ett_kafka_acl_filter,
+ &ett_kafka_acl_filter_matches,
+ &ett_kafka_acl_filter_match,
+ &ett_kafka_config_synonyms,
+ &ett_kafka_config_synonym,
+ &ett_kafka_config_entries,
+ &ett_kafka_config_entry,
+ &ett_kafka_log_dirs,
+ &ett_kafka_log_dir,
+ &ett_kafka_renewers,
+ &ett_kafka_renewer,
+ &ett_kafka_owners,
+ &ett_kafka_owner,
+ &ett_kafka_tokens,
+ &ett_kafka_token,
};
static ei_register_info ei[] = {
@@ -4192,6 +8996,8 @@ proto_register_kafka(void)
{ "kafka.bad_string_length", PI_MALFORMED, PI_WARN, "Invalid string length field", EXPFILL }},
{ &ei_kafka_bad_bytes_length,
{ "kafka.bad_bytes_length", PI_MALFORMED, PI_WARN, "Invalid byte length field", EXPFILL }},
+ { &ei_kafka_unknown_message_magic,
+ { "kafka.unknown_message_magic", PI_MALFORMED, PI_WARN, "Invalid message magic field", EXPFILL }},
};
module_t *kafka_module;
@@ -4205,7 +9011,8 @@ proto_register_kafka(void)
expert_kafka = expert_register_protocol(proto_kafka);
expert_register_field_array(expert_kafka, ei, array_length(ei));
- kafka_module = prefs_register_protocol(proto_kafka, apply_kafka_prefs);
+ kafka_module = prefs_register_protocol(proto_kafka, NULL);
+ kafka_handle = register_dissector("kafka", dissect_kafka_tcp, proto_kafka);
prefs_register_bool_preference(kafka_module, "show_string_bytes_lengths",
"Show length for string and bytes fields in the protocol tree",
@@ -4216,13 +9023,8 @@ proto_register_kafka(void)
void
proto_reg_handoff_kafka(void)
{
- dissector_handle_t kafka_handle;
-
- kafka_handle = create_dissector_handle(dissect_kafka_tcp, proto_kafka);
-
- /* Replace range of ports with current */
- dissector_add_uint_range_with_preference("tcp.port", "", kafka_handle);
- apply_kafka_prefs();
+ dissector_add_uint_range_with_preference("tcp.port", KAFKA_TCP_DEFAULT_RANGE, kafka_handle);
+ ssl_dissector_add(0, kafka_handle);
}
/*