aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Mann <mmann78@netscape.net>2019-08-28 03:37:04 -0400
committerAnders Broman <a.broman58@gmail.com>2019-09-05 03:25:39 +0000
commit168ee5003fa2bfba578118b807d6fdf37ec02abc (patch)
tree7c10842f881b21209e797aac0b14d0c4152aa329
parent05e39afb3fdbf76452a05c1c2a2c3164af9702ed (diff)
kafka: Cleanup to use "native" APIs.
Add "native" support for the "zig-zag" version of a varint in proto.[ch] and tvbuff.[ch]. Convert the use of varint in the KAFKA dissector to use the (new) "native" API. Ping-Bug: 15988 Change-Id: Ia83569203877df8c780f4f182916ed6327d0ec6c Reviewed-on: https://code.wireshark.org/review/34386 Petri-Dish: Alexis La Goutte <alexis.lagoutte@gmail.com> Tested-by: Petri Dish Buildbot Reviewed-by: Alexis La Goutte <alexis.lagoutte@gmail.com> Reviewed-by: Anders Broman <a.broman58@gmail.com>
-rw-r--r--debian/libwireshark0.symbols1
-rw-r--r--epan/dissectors/packet-kafka.c208
-rw-r--r--epan/proto.c91
-rw-r--r--epan/proto.h11
-rw-r--r--epan/tvbuff.c17
-rw-r--r--epan/tvbuff.h2
6 files changed, 173 insertions, 157 deletions
diff --git a/debian/libwireshark0.symbols b/debian/libwireshark0.symbols
index 946cf6c451..7e6e9df4c7 100644
--- a/debian/libwireshark0.symbols
+++ b/debian/libwireshark0.symbols
@@ -1237,6 +1237,7 @@ libwireshark.so.0 libwireshark0 #MINVER#
proto_tree_add_item_ret_boolean@Base 2.3.0
proto_tree_add_item_ret_display_string@Base 3.1.0
proto_tree_add_item_ret_display_string_and_length@Base 3.1.0
+ proto_tree_add_item_ret_int64@Base 3.1.1
proto_tree_add_item_ret_int@Base 1.99.6
proto_tree_add_item_ret_length@Base 2.1.0
proto_tree_add_item_ret_time_string@Base 3.1.1
diff --git a/epan/dissectors/packet-kafka.c b/epan/dissectors/packet-kafka.c
index 478db6f4fe..e250b2205d 100644
--- a/epan/dissectors/packet-kafka.c
+++ b/epan/dissectors/packet-kafka.c
@@ -999,118 +999,24 @@ dissect_kafka_bytes(proto_tree *tree, int hf_item, tvbuff_t *tvb, packet_info *p
}
static int
-dissect_kafka_timestamp(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset)
-{
- nstime_t nstime;
- guint64 milliseconds;
-
- milliseconds = tvb_get_ntoh64(tvb, offset);
-
- nstime.secs = (time_t) (milliseconds / 1000);
- nstime.nsecs = (int) ((milliseconds % 1000) * 1000000);
-
- proto_tree_add_time(tree, hf_item, tvb, offset, 8, &nstime);
- offset += 8;
-
- return offset;
-}
-
-/*
- * Function: tvb_read_kafka_varint32
- * ---------------------------------------------------
- * Reads 32-bit integer encoded as zig-zag. The 32-bit integer
- * takes up to 5 octets (32 = 4*7+4).
- *
- * tvb: actual data buffer
- * offset: offset in the buffer where the string length is to be found
- * p_len: pointer to a variable to store the length of the variable
- * p_overflow: pointer to a variable to store information that the value exceeds gint32 capacity
- *
- * returns: decoded value of 32-bit signed integer
- */
-static gint32
-tvb_read_kafka_varint32(tvbuff_t *tvb, gint offset, guint *p_len, gboolean *p_overflow)
-{
- gint32 v = 0;
- guint8 p = 0;
- guint i = 0;
-
- do {
- p = tvb_get_guint8(tvb, offset+i);
- v += (p & 0x7f) << (i*7);
- i += 1;
- } while ((p&0x80)!=0 && i<5);
-
- if (p_len != NULL) {
- *p_len = i;
- }
- // 32-bit integer in zig-zag can take up to 5 octets
- // the last octet can take at most 4 bits
- // either continuation bit is set or there are more than 32 bits
- if (p_overflow != NULL) {
- *p_overflow = ((p&0x80) != 0) || (i >= 5 && (p&0x70) != 0);
- }
-
- return (v>>1) ^ ((v & 1) ? -1 : 0);
-}
-
-/*
- * Function: tvb_read_kafka_varint64
- * ---------------------------------------------------
- * Reads 64-bit integer encoded as zig-zag. The 64-bit integer
- * takes up to 10 octets (64 = 9*7+1).
- *
- * tvb: actual data buffer
- * offset: offset in the buffer where the string length is to be found
- * p_len: pointer to a variable to store the length of the variable
- * p_overflow: pointer to a variable to store information that the value exceeds gint64 capacity
- *
- * returns: decoded value of 64-bit signed integer
- */
-static gint64
-tvb_read_kafka_varint64(tvbuff_t *tvb, gint offset, guint *p_len, gboolean *p_overflow)
-{
- gint64 v = 0;
- guint8 p = 0;
- guint i = 0;
-
- do {
- p = tvb_get_guint8(tvb, offset+i);
- v += (p & 0x7f) << (i*7);
- i += 1;
- } while ((p&0x80)!=0 && i<10);
-
- if (p_len != NULL) {
- *p_len = i;
- }
- // 64-bit integer in zig-zag can take up to 10 octets
- // the last octet can take at most 1 bit
- // either continuation bit is set or there are more than 64 bits
- if (p_overflow != NULL) {
- *p_overflow = ((p&0x80) != 0) || (i >= 10 && (p&0x7e) != 0);
- }
-
- return (v>>1) ^ ((v & 1) ? -1 : 0);
-}
-
-static int
dissect_kafka_timestamp_delta(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, guint64 first_timestamp)
{
nstime_t nstime;
guint64 milliseconds;
guint64 val;
- int len;
- gboolean overflow;
+ guint len;
proto_item *pi;
- val = tvb_read_kafka_varint64(tvb, offset, &len, &overflow);
+ len = tvb_get_varint(tvb, offset, FT_VARINT_MAX_LEN, &val, ENC_VARINT_ZIGZAG);
milliseconds = first_timestamp + val;
nstime.secs = (time_t) (milliseconds / 1000);
nstime.nsecs = (int) ((milliseconds % 1000) * 1000000);
pi = proto_tree_add_time(tree, hf_item, tvb, offset, len, &nstime);
- if (overflow) {
+ if (len == 0) {
+ //This will probably lead to a malformed packet, but it's better than not incrementing the offset
+ len = FT_VARINT_MAX_LEN;
expert_add_info(pinfo, pi, &ei_kafka_bad_varint);
}
@@ -1121,12 +1027,17 @@ static int
dissect_kafka_offset_delta(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, guint64 base_offset)
{
gint64 val;
- int len;
- gboolean overflow;
+ guint len;
+ proto_item *pi;
- val = tvb_read_kafka_varint64(tvb, offset, &len, &overflow);
+ len = tvb_get_varint(tvb, offset, FT_VARINT_MAX_LEN, &val, ENC_VARINT_ZIGZAG);
- proto_tree_add_int64(tree, hf_item, tvb, offset, len, base_offset+val);
+ pi = proto_tree_add_int64(tree, hf_item, tvb, offset, len, base_offset+val);
+ if (len == 0) {
+ //This will probably lead to a malformed packet, but it's better than not incrementing the offset
+ len = FT_VARINT_MAX_LEN;
+ expert_add_info(pinfo, pi, &ei_kafka_bad_varint);
+ }
return offset+len;
}
@@ -1152,20 +1063,20 @@ dissect_kafka_offset_delta(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tr
static int
dissect_kafka_string_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, int *p_string_offset, int *p_string_length)
{
- gint val;
- gint len;
- gboolean overflow;
+ gint64 val;
+ guint len;
proto_item *pi;
- val = tvb_read_kafka_varint32(tvb, offset, &len, &overflow);
+ len = tvb_get_varint(tvb, offset, 5, &val, ENC_VARINT_ZIGZAG);
- if (overflow) {
+ if (len == 0) {
pi = proto_tree_add_string_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<INVALID>");
expert_add_info(pinfo, pi, &ei_kafka_bad_varint);
+ len = 5;
val = 0;
} else if (val > 0) {
// there is payload available, possibly with 0 octets
- pi = proto_tree_add_item(tree, hf_item, tvb, offset+len, val, ENC_NA | ENC_UTF_8);
+ pi = proto_tree_add_item(tree, hf_item, tvb, offset+len, (gint)val, ENC_NA | ENC_UTF_8);
} else if (val == 0) {
// there is empty payload (0 octets)
pi = proto_tree_add_string_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<EMPTY>");
@@ -1183,10 +1094,10 @@ dissect_kafka_string_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree
*p_string_offset = offset+len;
}
if (p_string_length != NULL) {
- *p_string_length = val;
+ *p_string_length = (gint)val;
}
- return offset+len+val;
+ return offset+len+(gint)val;
}
/*
@@ -1210,20 +1121,20 @@ dissect_kafka_string_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree
static int
dissect_kafka_bytes_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, int hf_item, int offset, int *p_bytes_offset, int *p_bytes_length)
{
- gint val;
- gint len;
- gboolean overflow;
+ gint64 val;
+ guint len;
proto_item *pi;
- val = tvb_read_kafka_varint32(tvb, offset, &len, &overflow);
+ len = tvb_get_varint(tvb, offset, 5, &val, ENC_VARINT_ZIGZAG);
- if (overflow) {
+ if (len == 0) {
pi = proto_tree_add_bytes_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<INVALID>");
expert_add_info(pinfo, pi, &ei_kafka_bad_varint);
+ len = 5;
val = 0;
} else if (val > 0) {
// there is payload available, possibly with 0 octets
- pi = proto_tree_add_item(tree, hf_item, tvb, offset+len, val, ENC_NA);
+ pi = proto_tree_add_item(tree, hf_item, tvb, offset+len, (gint)val, ENC_NA);
} else if (val == 0) {
// there is empty payload (0 octets)
pi = proto_tree_add_bytes_format_value(tree, hf_item, tvb, offset+len, 0, NULL, "<EMPTY>");
@@ -1241,9 +1152,9 @@ dissect_kafka_bytes_new(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree,
*p_bytes_offset = offset+len;
}
if (p_bytes_length != NULL) {
- *p_bytes_length = val;
+ *p_bytes_length = (gint)val;
}
- return offset+len+val;
+ return offset+len+(gint)val;
}
/* Calculate and show the reduction in transmitted size due to compression */
@@ -1284,16 +1195,16 @@ dissect_kafka_record_headers(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *
{
proto_item *record_headers_ti;
proto_tree *subtree;
- gint32 count;
- gint len;
- gboolean overflow;
+ gint64 count;
+ guint len;
int i;
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_record_headers, &record_headers_ti, "Headers");
- count = tvb_read_kafka_varint32(tvb, offset, &len, &overflow);
- if (overflow) {
+ len = tvb_get_varint(tvb, offset, 5, &count, ENC_VARINT_ZIGZAG);
+ if (len == 0) {
expert_add_info(pinfo, record_headers_ti, &ei_kafka_bad_varint);
+ len = 5;
} else if (count < -1) { // -1 means null array
expert_add_info(pinfo, record_headers_ti, &ei_kafka_bad_array_length);
}
@@ -1314,9 +1225,8 @@ dissect_kafka_record(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, in
proto_item *record_ti;
proto_tree *subtree;
- gint32 size;
- gint len;
- gboolean overflow;
+ gint64 size;
+ guint len;
int offset, end_offset;
@@ -1324,16 +1234,16 @@ dissect_kafka_record(tvbuff_t *tvb, packet_info *pinfo _U_, proto_tree *tree, in
subtree = proto_tree_add_subtree(tree, tvb, offset, -1, ett_kafka_record, &record_ti, "Record");
- size = tvb_read_kafka_varint32(tvb, offset, &len, &overflow);
- if (overflow) {
+ len = tvb_get_varint(tvb, offset, 5, &size, ENC_VARINT_ZIGZAG);
+ if (len == 0) {
expert_add_info(pinfo, record_ti, &ei_kafka_bad_varint);
- return offset + len;
+ return offset + 5;
} else if (size < 6) {
expert_add_info(pinfo, record_ti, &ei_kafka_bad_record_length);
return offset + len;
}
- end_offset = offset + len + size;
+ end_offset = offset + len + (gint)size;
offset += len;
proto_tree_add_item(subtree, hf_kafka_record_attributes, tvb, offset, 1, ENC_BIG_ENDIAN);
@@ -1672,7 +1582,8 @@ dissect_kafka_message_old(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, i
offset += 1;
if (magic_byte == 1) {
- offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_message_timestamp, offset);
+ proto_tree_add_item(subtree, hf_kafka_message_timestamp, tvb, offset, 8, ENC_TIME_MSECS|ENC_BIG_ENDIAN);
+ offset += 8;
}
offset = dissect_kafka_bytes(subtree, hf_kafka_message_key, tvb, pinfo, offset, NULL, NULL);
@@ -1774,8 +1685,10 @@ dissect_kafka_message_new(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, i
offset += 4;
first_timestamp = tvb_get_ntoh64(tvb, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_batch_first_timestamp, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_batch_last_timestamp, offset);
+ proto_tree_add_item(subtree, hf_kafka_batch_first_timestamp, tvb, offset, 8, ENC_TIME_MSECS|ENC_BIG_ENDIAN);
+ offset += 8;
+ proto_tree_add_item(subtree, hf_kafka_batch_last_timestamp, tvb, offset, 8, ENC_TIME_MSECS|ENC_BIG_ENDIAN);
+ offset += 8;
proto_tree_add_item(subtree, hf_kafka_producer_id, tvb, offset, 8, ENC_BIG_ENDIAN);
offset += 8;
@@ -3728,7 +3641,8 @@ dissect_kafka_offset_commit_request_partition(tvbuff_t *tvb, packet_info *pinfo,
if (api_version == 1) {
/* timestamp */
- offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_commit_timestamp, offset);
+ proto_tree_add_item(subtree, hf_kafka_commit_timestamp, tvb, offset, 8, ENC_TIME_MSECS|ENC_BIG_ENDIAN);
+ offset += 8;
}
/* metadata */
@@ -6950,9 +6864,12 @@ dissect_kafka_create_delegation_token_response(tvbuff_t *tvb, packet_info *pinfo
offset = dissect_kafka_string(tree, hf_kafka_token_principal_type, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_string(tree, hf_kafka_token_principal_name, tvb, pinfo, offset, NULL, NULL);
- offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_issue_timestamp, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_expiry_timestamp, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_max_timestamp, offset);
+ proto_tree_add_item(tree, hf_kafka_token_issue_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
+ proto_tree_add_item(tree, hf_kafka_token_expiry_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
+ proto_tree_add_item(tree, hf_kafka_token_max_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
offset = dissect_kafka_string(tree, hf_kafka_token_id, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_bytes(tree, hf_kafka_token_hmac, tvb, pinfo, offset, NULL, NULL);
@@ -6981,7 +6898,8 @@ dissect_kafka_renew_delegation_token_response(tvbuff_t *tvb, packet_info *pinfo,
kafka_api_version_t api_version _U_)
{
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_expiry_timestamp, offset);
+ proto_tree_add_item(tree, hf_kafka_token_expiry_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
return offset;
@@ -7006,7 +6924,8 @@ dissect_kafka_expire_delegation_token_response(tvbuff_t *tvb, packet_info *pinfo
kafka_api_version_t api_version _U_)
{
offset = dissect_kafka_error(tvb, pinfo, tree, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, tree, hf_kafka_token_expiry_timestamp, offset);
+ proto_tree_add_item(tree, hf_kafka_token_expiry_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
offset = dissect_kafka_throttle_time(tvb, pinfo, tree, offset);
return offset;
@@ -7080,9 +6999,12 @@ dissect_kafka_describe_delegation_token_response_token(tvbuff_t *tvb, packet_inf
offset = dissect_kafka_string(subtree, hf_kafka_token_principal_type, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_string(subtree, hf_kafka_token_principal_name, tvb, pinfo, offset, NULL, NULL);
- offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_token_issue_timestamp, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_token_expiry_timestamp, offset);
- offset = dissect_kafka_timestamp(tvb, pinfo, subtree, hf_kafka_token_max_timestamp, offset);
+ proto_tree_add_item(subtree, hf_kafka_token_issue_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
+ proto_tree_add_item(subtree, hf_kafka_token_expiry_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
+ proto_tree_add_item(subtree, hf_kafka_token_max_timestamp, tvb, offset, 8, ENC_TIME_MSECS | ENC_BIG_ENDIAN);
+ offset += 8;
offset = dissect_kafka_string(subtree, hf_kafka_token_id, tvb, pinfo, offset, NULL, NULL);
offset = dissect_kafka_bytes(subtree, hf_kafka_token_hmac, tvb, pinfo, offset, NULL, NULL);
diff --git a/epan/proto.c b/epan/proto.c
index 72b17bbb49..70ccbfa641 100644
--- a/epan/proto.c
+++ b/epan/proto.c
@@ -2383,7 +2383,7 @@ test_length(header_field_info *hfinfo, tvbuff_t *tvb,
return;
if ((hfinfo->type == FT_STRINGZ) ||
- ((encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_QUIC)) &&
+ ((encoding & (ENC_VARIANT_MASK)) &&
(IS_FT_UINT(hfinfo->type) || IS_FT_INT(hfinfo->type)))) {
/* If we're fetching until the end of the TVB, only validate
* that the offset is within range.
@@ -2488,7 +2488,12 @@ proto_tree_new_item(field_info *new_fi, proto_tree *tree,
} else if (encoding & ENC_VARINT_QUIC) {
new_fi->length = tvb_get_varint(tvb, start, (length == -1) ? FT_VARINT_MAX_LEN : length, &value64, encoding);
value = (guint32)value64;
- } else {
+ } else if (encoding & ENC_VARINT_ZIGZAG) {
+ new_fi->length = tvb_get_varint(tvb, start, (length == -1) ? FT_VARINT_MAX_LEN : length, &value64, encoding);
+ new_fi->flags |= FI_VARINT;
+ value = (guint32)value64;
+ }
+ else {
/*
* Map all non-zero values to little-endian for
* backwards compatibility.
@@ -2511,7 +2516,11 @@ proto_tree_new_item(field_info *new_fi, proto_tree *tree,
new_fi->flags |= FI_VARINT;
} else if (encoding & ENC_VARINT_QUIC) {
new_fi->length = tvb_get_varint(tvb, start, (length == -1) ? FT_VARINT_MAX_LEN : length, &value64, encoding);
- } else {
+ } else if (encoding & ENC_VARINT_ZIGZAG) {
+ new_fi->length = tvb_get_varint(tvb, start, (length == -1) ? FT_VARINT_MAX_LEN : length, &value64, encoding);
+ new_fi->flags |= FI_VARINT;
+ }
+ else {
/*
* Map all other non-zero values to little-endian for
* backwards compatibility.
@@ -2973,7 +2982,7 @@ proto_tree_add_item_ret_uint(proto_tree *tree, int hfindex, tvbuff_t *tvb,
}
/* I believe it's ok if this is called with a NULL tree */
/* XXX - modify if we ever support EBCDIC FT_CHAR */
- if (encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_QUIC)) {
+ if (encoding & (ENC_VARIANT_MASK)) {
guint64 temp64;
tvb_get_varint(tvb, start, length, &temp64, encoding);
value = (guint32)temp64;
@@ -3000,7 +3009,7 @@ proto_tree_add_item_ret_uint(proto_tree *tree, int hfindex, tvbuff_t *tvb,
proto_tree_set_uint(new_fi, value);
new_fi->flags |= (encoding & ENC_LITTLE_ENDIAN) ? FI_LITTLE_ENDIAN : FI_BIG_ENDIAN;
- if (encoding & ENC_VARINT_PROTOBUF) {
+ if (encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_ZIGZAG)) {
new_fi->flags |= FI_VARINT;
}
return proto_tree_add_node(tree, new_fi);
@@ -3253,14 +3262,14 @@ proto_tree_add_item_ret_uint64(proto_tree *tree, int hfindex, tvbuff_t *tvb,
/* length validation for native number encoding caught by get_uint64_value() */
/* length has to be -1 or > 0 regardless of encoding */
if (length < -1 || length == 0)
- REPORT_DISSECTOR_BUG("Invalid length %d passed to proto_tree_add_item_ret_uint",
+ REPORT_DISSECTOR_BUG("Invalid length %d passed to proto_tree_add_item_ret_uint64",
length);
if (encoding & ENC_STRING) {
REPORT_DISSECTOR_BUG("wrong encoding");
}
/* I believe it's ok if this is called with a NULL tree */
- if (encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_QUIC)) {
+ if (encoding & (ENC_VARIANT_MASK)) {
tvb_get_varint(tvb, start, length, &value, encoding);
} else {
value = get_uint64_value(tree, tvb, start, length, encoding);
@@ -3285,7 +3294,65 @@ proto_tree_add_item_ret_uint64(proto_tree *tree, int hfindex, tvbuff_t *tvb,
proto_tree_set_uint64(new_fi, value);
new_fi->flags |= (encoding & ENC_LITTLE_ENDIAN) ? FI_LITTLE_ENDIAN : FI_BIG_ENDIAN;
- if (encoding & ENC_VARINT_PROTOBUF) {
+ if (encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_ZIGZAG)) {
+ new_fi->flags |= FI_VARINT;
+ }
+
+ return proto_tree_add_node(tree, new_fi);
+}
+
+proto_item *
+proto_tree_add_item_ret_int64(proto_tree *tree, int hfindex, tvbuff_t *tvb,
+ const gint start, gint length, const guint encoding, gint64 *retval)
+{
+ header_field_info *hfinfo = proto_registrar_get_nth(hfindex);
+ field_info *new_fi;
+ gint64 value;
+
+ DISSECTOR_ASSERT_HINT(hfinfo != NULL, "Not passed hfi!");
+
+ switch (hfinfo->type) {
+ case FT_INT40:
+ case FT_INT48:
+ case FT_INT56:
+ case FT_INT64:
+ break;
+ default:
+ REPORT_DISSECTOR_BUG("field %s is not of type FT_INT40, FT_INT48, FT_INT56, or FT_INT64",
+ hfinfo->abbrev);
+ }
+
+ /* length validation for native number encoding caught by get_uint64_value() */
+ /* length has to be -1 or > 0 regardless of encoding */
+ if (length < -1 || length == 0)
+ REPORT_DISSECTOR_BUG("Invalid length %d passed to proto_tree_add_item_ret_int64",
+ length);
+
+ if (encoding & ENC_STRING) {
+ REPORT_DISSECTOR_BUG("wrong encoding");
+ }
+ /* I believe it's ok if this is called with a NULL tree */
+ if (encoding & (ENC_VARIANT_MASK)) {
+ tvb_get_varint(tvb, start, length, &value, encoding);
+ }
+ else {
+ value = get_int64_value(tree, tvb, start, length, encoding);
+ }
+
+ if (retval) {
+ *retval = value;
+ }
+
+ CHECK_FOR_NULL_TREE(tree);
+
+ TRY_TO_FAKE_THIS_ITEM(tree, hfinfo->id, hfinfo);
+
+ new_fi = new_field_info(tree, hfinfo, tvb, start, length);
+
+ proto_tree_set_int64(new_fi, value);
+
+ new_fi->flags |= (encoding & ENC_LITTLE_ENDIAN) ? FI_LITTLE_ENDIAN : FI_BIG_ENDIAN;
+ if (encoding & (ENC_VARINT_PROTOBUF | ENC_VARINT_ZIGZAG)) {
new_fi->flags |= FI_VARINT;
}
@@ -3342,7 +3409,7 @@ proto_tree_add_item_ret_varint(proto_tree *tree, int hfindex, tvbuff_t *tvb,
proto_tree_set_uint64(new_fi, value);
new_fi->flags |= (encoding & ENC_LITTLE_ENDIAN) ? FI_LITTLE_ENDIAN : FI_BIG_ENDIAN;
- if (encoding & ENC_VARINT_PROTOBUF) {
+ if (encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_ZIGZAG)) {
new_fi->flags |= FI_VARINT;
}
@@ -5700,7 +5767,7 @@ get_hfi_length(header_field_info *hfinfo, tvbuff_t *tvb, const gint start, gint
* of the string", and if the tvbuff if short, we just
* throw an exception.
*
- * For ENC_VARINT_PROTOBUF|ENC_VARINT_QUIC, it means "find the end of the string",
+ * For ENC_VARINT_PROTOBUF|ENC_VARINT_QUIC|ENC_VARIANT_ZIGZAG, it means "find the end of the string",
* and if the tvbuff if short, we just throw an exception.
*
* It's not valid for any other type of field. For those
@@ -5711,7 +5778,7 @@ get_hfi_length(header_field_info *hfinfo, tvbuff_t *tvb, const gint start, gint
* Length would run past the end of the packet.
*/
if ((IS_FT_INT(hfinfo->type)) || (IS_FT_UINT(hfinfo->type))) {
- if (encoding & ENC_VARINT_PROTOBUF) {
+ if (encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_ZIGZAG)) {
/*
* Leave the length as -1, so our caller knows
* it was -1.
@@ -5847,7 +5914,7 @@ get_full_length(header_field_info *hfinfo, tvbuff_t *tvb, const gint start,
case FT_INT48:
case FT_INT56:
case FT_INT64:
- if (encoding & (ENC_VARINT_PROTOBUF|ENC_VARINT_QUIC)) {
+ if (encoding & (ENC_VARIANT_MASK)) {
if (length < -1) {
report_type_length_mismatch(NULL, "a FT_[U]INT", length, TRUE);
}
diff --git a/epan/proto.h b/epan/proto.h
index b6f5ef37b0..9ba43f1be4 100644
--- a/epan/proto.h
+++ b/epan/proto.h
@@ -539,6 +539,13 @@ void proto_report_dissector_bug(const char *format, ...)
* See https://tools.ietf.org/html/draft-ietf-quic-transport-08#section-8.1
*/
#define ENC_VARINT_QUIC 0x00000004
+ /*
+ * Use "zig-zag" varint format as described in Protobuf protocol
+ * See https://developers.google.com/protocol-buffers/docs/encoding?csw=1#types
+ */
+#define ENC_VARINT_ZIGZAG 0x00000008
+
+#define ENC_VARIANT_MASK (ENC_VARINT_PROTOBUF|ENC_VARINT_QUIC|ENC_VARINT_ZIGZAG)
/* For cases where a string encoding contains hex, bit-or one or more
* of these for the allowed separator(s), as well as with ENC_STR_HEX.
@@ -1195,6 +1202,10 @@ proto_tree_add_item_ret_int(proto_tree *tree, int hfindex, tvbuff_t *tvb,
const gint start, gint length, const guint encoding, gint32 *retval);
WS_DLL_PUBLIC proto_item *
+proto_tree_add_item_ret_int64(proto_tree *tree, int hfindex, tvbuff_t *tvb,
+ const gint start, gint length, const guint encoding, gint64 *retval);
+
+WS_DLL_PUBLIC proto_item *
proto_tree_add_item_ret_uint(proto_tree *tree, int hfindex, tvbuff_t *tvb,
const gint start, gint length, const guint encoding, guint32 *retval);
diff --git a/epan/tvbuff.c b/epan/tvbuff.c
index 9b691f3e6f..73cb88ef50 100644
--- a/epan/tvbuff.c
+++ b/epan/tvbuff.c
@@ -4082,7 +4082,22 @@ tvb_get_varint(tvbuff_t *tvb, guint offset, guint maxlen, guint64 *value, const
return i + 1;
}
}
- } else if (encoding & ENC_VARINT_QUIC) {
+ } else if (encoding & ENC_VARINT_ZIGZAG) {
+ guint i;
+ guint64 b; /* current byte */
+
+ for (i = 0; ((i < FT_VARINT_MAX_LEN) && (i < maxlen)); ++i) {
+ b = tvb_get_guint8(tvb, offset++);
+ *value |= ((b & 0x7F) << (i * 7)); /* add lower 7 bits to val */
+
+ if (b < 0x80) {
+ /* end successfully becauseof last byte's msb(most significant bit) is zero */
+ *value = (*value >> 1) ^ ((*value & 1) ? -1 : 0);
+ return i + 1;
+ }
+ }
+ }
+ else if (encoding & ENC_VARINT_QUIC) {
/* calculate variable length */
*value = tvb_get_guint8(tvb, offset);
diff --git a/epan/tvbuff.h b/epan/tvbuff.h
index af77ec643a..6a048ed296 100644
--- a/epan/tvbuff.h
+++ b/epan/tvbuff.h
@@ -1016,7 +1016,7 @@ extern tvbuff_t* base64_to_tvb(tvbuff_t *parent, const char *base64);
* @param offset The offset in tvb from which we begin trying to extract integer.
* @param maxlen The maximum distance from offset that we may try to extract integer
* @param value if parsing succeeds, parsed varint will store here.
- * @param encoding The ENC_* that defines the format (e.g., ENC_VARINT_PROTOBUF, ENC_VARINT_QUIC)
+ * @param encoding The ENC_* that defines the format (e.g., ENC_VARINT_PROTOBUF, ENC_VARINT_QUIC, ENC_VARINT_ZIGZAG)
* @return the length of this varint in tvb. 0 means parsing failed.
*/
WS_DLL_PUBLIC guint tvb_get_varint(tvbuff_t *tvb, guint offset, guint maxlen, guint64 *value, const guint encoding);