aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorParav Pandit <paravpandit@yahoo.com>2016-12-26 00:47:57 -0500
committerAnders Broman <a.broman58@gmail.com>2016-12-28 07:22:28 +0000
commitdd7349754beb739592f1e513506c6fc25eb4b22e (patch)
tree2c7aa42e45e354093431de255fbfab6b9754e15a
parente5a39920f9883133945b088c85c8fbf4737a6ecd (diff)
Added basic support for NVM Express over Fabrics for RDMA.
NVM Express is high speed interface for accessing solid state drives. NVM Express specifications are maintained by NVM Express industry association at http://www.nvmexpress.org. Bug: 13201 Change-Id: Id40edaf72838eea9f4087c8ddba9518a9374efab Tested-by: paravpandit@yahoo.com Reviewed-on: https://code.wireshark.org/review/19063 Reviewed-by: Michael Mann <mmann78@netscape.net> Petri-Dish: Michael Mann <mmann78@netscape.net> Tested-by: Petri Dish Buildbot <buildbot-no-reply@wireshark.org> Reviewed-by: Parav Pandit <paravpandit@yahoo.com> Reviewed-by: Anders Broman <a.broman58@gmail.com>
-rw-r--r--docbook/release-notes.asciidoc2
-rw-r--r--epan/dissectors/CMakeLists.txt2
-rw-r--r--epan/dissectors/Makefile.am3
-rw-r--r--epan/dissectors/packet-nvme-rdma.c1064
-rw-r--r--epan/dissectors/packet-nvme.c551
-rw-r--r--epan/dissectors/packet-nvme.h95
6 files changed, 1717 insertions, 0 deletions
diff --git a/docbook/release-notes.asciidoc b/docbook/release-notes.asciidoc
index ed918310ed..47e8c2071a 100644
--- a/docbook/release-notes.asciidoc
+++ b/docbook/release-notes.asciidoc
@@ -66,6 +66,8 @@ ISO 15765
Local Service Discovery (LSD)
M2 Application Protocol
Nordic BLE Sniffer
+NVMe Fabrics RDMA
+NVMe
RFTap Protocol
SCTE-35 Digital Program Insertion Messages
Snort Post-dissector
diff --git a/epan/dissectors/CMakeLists.txt b/epan/dissectors/CMakeLists.txt
index ee77cedfcd..aa827b95f5 100644
--- a/epan/dissectors/CMakeLists.txt
+++ b/epan/dissectors/CMakeLists.txt
@@ -985,6 +985,8 @@ set(DISSECTOR_SRC
packet-ntlmssp.c
packet-ntp.c
packet-null.c
+ packet-nvme.c
+ packet-nvme-rdma.c
packet-nwmtp.c
packet-nwp.c
packet-oampdu.c
diff --git a/epan/dissectors/Makefile.am b/epan/dissectors/Makefile.am
index d1440eb064..adb5405901 100644
--- a/epan/dissectors/Makefile.am
+++ b/epan/dissectors/Makefile.am
@@ -1007,6 +1007,8 @@ DISSECTOR_SRC = \
packet-ntlmssp.c \
packet-ntp.c \
packet-null.c \
+ packet-nvme.c \
+ packet-nvme-rdma.c \
packet-nwmtp.c \
packet-nwp.c \
packet-oampdu.c \
@@ -1643,6 +1645,7 @@ DISSECTOR_INCLUDES = \
packet-nlm.h \
packet-ntlmssp.h \
packet-ntp.h \
+ packet-nvme.h \
packet-ocsp.h \
packet-opensafety.h \
packet-osi.h \
diff --git a/epan/dissectors/packet-nvme-rdma.c b/epan/dissectors/packet-nvme-rdma.c
new file mode 100644
index 0000000000..5ac99c17c5
--- /dev/null
+++ b/epan/dissectors/packet-nvme-rdma.c
@@ -0,0 +1,1064 @@
+/* packet-nvme-rdma.c
+ * Routines for NVM Express over Fabrics(RDMA) dissection
+ * Copyright 2016
+ * Code by Parav Pandit
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/*
+NVM Express is high speed interface for accessing solid state drives.
+NVM Express specifications are maintained by NVM Express industry
+association at http://www.nvmexpress.org.
+
+This file adds support to dissect NVM Express over fabrics packets
+for RDMA. This adds very basic support for dissecting commands
+completions.
+
+Current dissection supports dissection of
+(a) NVMe cmd and cqe
+(b) NVMe Fabric command and cqe
+As part of it, it also calculates cmd completion latencies.
+
+This protocol is similar to iSCSI and SCSI dissection where iSCSI is
+transport protocol for carying SCSI commands and responses. Similarly
+NVMe Fabrics - RDMA transport protocol carries NVMe commands.
+
+ +----------+
+ | NVMe |
+ +------+---+
+ |
++-----------+---------+
+| NVMe Fabrics |
++----+-----------+----+
+ | |
++----+---+ +---+----+
+| RDMA | | FC |
++--------+ +--------+
+
+References:
+NVMe Express fabrics specification is located at
+http://www.nvmexpress.org/wp-content/uploads/NVMe_over_Fabrics_1_0_Gold_20160605.pdf
+
+NVMe Express specification is located at
+http://www.nvmexpress.org/wp-content/uploads/NVM-Express-1_2a.pdf
+
+NVM Express RDMA TCP port assigned by IANA that maps to RDMA IP service
+TCP port can be found at
+http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=NVM+Express
+
+*/
+#include "config.h"
+
+#include <stdlib.h>
+#include <errno.h>
+
+#include <epan/packet.h>
+#include <epan/prefs.h>
+#include <epan/conversation.h>
+#include <epan/addr_resolv.h>
+
+#include "packet-infiniband.h"
+#include "packet-nvme.h"
+
+#define SID_ULP_MASK 0x00000000FF000000
+#define SID_PROTO_MASK 0x0000000000FF0000
+#define SID_PORT_MASK 0x000000000000FFFF
+
+#define SID_ULP 0x01
+#define SID_PROTO_TCP 0x06
+#define NVME_RDMA_TCP_PORT_RANGE "4220"
+
+#define SID_MASK (SID_ULP_MASK | SID_PROTO_MASK)
+#define SID_ULP_TCP ((SID_ULP << 3 * 8) | (SID_PROTO_TCP << 2 * 8))
+
+#define NVME_FABRICS_RDMA "NVMe Fabrics RDMA"
+
+#define NVME_FABRIC_CMD_SIZE NVME_CMD_SIZE
+#define NVME_FABRIC_CQE_SIZE NVME_CQE_SIZE
+
+#define NVME_FABRIC_OPC 0x7F
+
+#define NVME_FCTYPE_CONNECT 0x1
+#define NVME_FCTYPE_AUTH_RECV 0x6
+#define NVME_FCTYPE_PROP_GET 0x4
+#define NVME_FCTYPE_PROP_SET 0x0
+
+static const value_string fctype_tbl[] = {
+ { NVME_FCTYPE_CONNECT, "Connect"},
+ { NVME_FCTYPE_PROP_GET, "Property Get"},
+ { NVME_FCTYPE_PROP_SET, "Property Set"},
+ { NVME_FCTYPE_AUTH_RECV, "Authentication Recv"},
+ { 0, NULL}
+};
+
+static const value_string prop_offset_tbl[] = {
+ { 0x0, "Controller Capabilities"},
+ { 0x8, "Version"},
+ { 0xc, "Reserved"},
+ { 0x10, "Reserved"},
+ { 0x14, "Controller Configuration"},
+ { 0x18, "Reserved"},
+ { 0x1c, "Controller Status"},
+ { 0x20, "NVM Subsystem Reset"},
+ { 0x24, "Reserved"},
+ { 0x28, "Reserved"},
+ { 0x30, "Reserved"},
+ { 0x38, "Reserved"},
+ { 0x3c, "Reserved"},
+ { 0x40, "Reserved"},
+ { 0, NULL}
+};
+
+static const value_string attr_size_tbl[] = {
+ { 0, "4 bytes"},
+ { 1, "8 bytes"},
+ { 0, NULL}
+};
+
+struct nvme_rdma_q_ctx {
+ struct nvme_q_ctx n_q_ctx;
+};
+
+struct nvme_rdma_cmd_ctx {
+ struct nvme_cmd_ctx n_cmd_ctx;
+ guint8 fctype; /* fabric cmd type */
+};
+
+void proto_reg_handoff_nvme_rdma(void);
+void proto_register_nvme_rdma(void);
+
+static int proto_nvme_rdma = -1;
+static dissector_handle_t ib_handler;
+static int proto_ib = -1;
+
+/* NVMe Fabrics RDMA CM Private data */
+static int hf_nvme_rdma_cm_req_recfmt = -1;
+static int hf_nvme_rdma_cm_req_qid = -1;
+static int hf_nvme_rdma_cm_req_hrqsize = -1;
+static int hf_nvme_rdma_cm_req_hsqsize = -1;
+static int hf_nvme_rdma_cm_req_reserved = -1;
+
+static int hf_nvme_rdma_cm_rsp_recfmt = -1;
+static int hf_nvme_rdma_cm_rsp_crqsize = -1;
+static int hf_nvme_rdma_cm_rsp_reserved = -1;
+
+static int hf_nvme_rdma_cm_rej_recfmt = -1;
+static int hf_nvme_rdma_cm_rej_status = -1;
+static int hf_nvme_rdma_cm_rej_reserved = -1;
+
+/* NVMe Fabric Cmd */
+static int hf_nvme_rdma_cmd = -1;
+static int hf_nvme_rdma_from_host_unknown_data = -1;
+
+static int hf_nvme_rdma_cmd_opc = -1;
+static int hf_nvme_rdma_cmd_rsvd = -1;
+static int hf_nvme_rdma_cmd_cid = -1;
+static int hf_nvme_rdma_cmd_fctype = -1;
+static int hf_nvme_rdma_cmd_connect_rsvd1 = -1;
+static int hf_nvme_rdma_cmd_connect_sgl1 = -1;
+static int hf_nvme_rdma_cmd_connect_recfmt = -1;
+static int hf_nvme_rdma_cmd_connect_qid = -1;
+static int hf_nvme_rdma_cmd_connect_sqsize = -1;
+static int hf_nvme_rdma_cmd_connect_cattr = -1;
+static int hf_nvme_rdma_cmd_connect_rsvd2 = -1;
+static int hf_nvme_rdma_cmd_connect_kato = -1;
+static int hf_nvme_rdma_cmd_connect_rsvd3 = -1;
+
+static int hf_nvme_rdma_cmd_prop_attr_rsvd = -1;
+static int hf_nvme_rdma_cmd_prop_attr_rsvd1 = -1;
+static int hf_nvme_rdma_cmd_prop_attr_size = -1;
+static int hf_nvme_rdma_cmd_prop_attr_rsvd2 = -1;
+static int hf_nvme_rdma_cmd_prop_attr_offset = -1;
+static int hf_nvme_rdma_cmd_prop_attr_get_rsvd3 = -1;
+static int hf_nvme_rdma_cmd_prop_attr_set_4B_value = -1;
+static int hf_nvme_rdma_cmd_prop_attr_set_4B_value_rsvd = -1;
+static int hf_nvme_rdma_cmd_prop_attr_set_8B_value = -1;
+static int hf_nvme_rdma_cmd_prop_attr_set_rsvd3 = -1;
+
+static int hf_nvme_rdma_cmd_generic_rsvd1 = -1;
+static int hf_nvme_rdma_cmd_generic_field = -1;
+
+/* NVMe Fabric CQE */
+static int hf_nvme_rdma_cqe = -1;
+static int hf_nvme_rdma_cqe_sts = -1;
+static int hf_nvme_rdma_cqe_sqhd = -1;
+static int hf_nvme_rdma_cqe_rsvd = -1;
+static int hf_nvme_rdma_cqe_cid = -1;
+static int hf_nvme_rdma_cqe_status = -1;
+static int hf_nvme_rdma_cqe_status_rsvd = -1;
+
+static int hf_nvme_rdma_cqe_connect_cntlid = -1;
+static int hf_nvme_rdma_cqe_connect_authreq = -1;
+static int hf_nvme_rdma_cqe_connect_rsvd = -1;
+static int hf_nvme_rdma_cqe_prop_set_rsvd = -1;
+
+static int hf_nvme_rdma_to_host_unknown_data = -1;
+
+/* tracking Cmd and its respective CQE */
+static int hf_nvme_rdma_cmd_pkt = -1;
+static int hf_nvme_rdma_cqe_pkt = -1;
+static int hf_nvme_rdma_cmd_latency = -1;
+static int hf_nvme_rdma_cmd_qid = -1;
+
+/* Initialize the subtree pointers */
+static gint ett_cm = -1;
+static gint ett_data = -1;
+
+static range_t *gPORT_RANGE;
+
+static conversation_infiniband_data *get_conversion_data(conversation_t *conv)
+{
+ conversation_infiniband_data *conv_data;
+
+ conv_data = (conversation_infiniband_data *)conversation_get_proto_data(conv, proto_ib);
+ if (!conv_data)
+ return NULL;
+
+ if ((conv_data->service_id & SID_MASK) != SID_ULP_TCP)
+ return NULL; /* the service id doesn't match that of TCP ULP - nothing for us to do here */
+
+ if (!(value_is_in_range(gPORT_RANGE, (guint32)(conv_data->service_id & SID_PORT_MASK))))
+ return NULL; /* the port doesn't match that of NVM Express Fabrics - nothing for us to do here */
+ return conv_data;
+}
+
+static conversation_t*
+find_ib_conversation(packet_info *pinfo, conversation_infiniband_data **uni_conv_data)
+{
+ conversation_t *conv;
+ conversation_infiniband_data *conv_data;
+
+ conv = find_conversation(pinfo->num, &pinfo->dst, &pinfo->dst,
+ PT_IBQP, pinfo->destport, pinfo->destport,
+ NO_ADDR_B|NO_PORT_B);
+ if (!conv)
+ return NULL; /* nothing to do with no conversation context */
+
+ conv_data = get_conversion_data(conv);
+ *uni_conv_data = conv_data;
+ if (!conv_data)
+ return NULL;
+
+ /* now that we found unidirectional conversation, find bidirectional
+ * conversation, so that we can relate to nvme q.
+ */
+ return find_conversation(pinfo->num, &pinfo->src, &pinfo->dst,
+ PT_IBQP, pinfo->srcport, pinfo->destport, 0);
+}
+
+static guint16 find_nvme_qid(packet_info *pinfo)
+{
+ conversation_t *conv;
+ conversation_infiniband_data *conv_data;
+ guint16 qid;
+
+ conv = find_conversation(pinfo->num, &pinfo->dst, &pinfo->dst,
+ PT_IBQP, pinfo->destport, pinfo->destport,
+ NO_ADDR_B|NO_PORT_B);
+ if (!conv)
+ return 0; /* nothing to do with no conversation context */
+
+ conv_data = get_conversion_data(conv);
+ if (!conv_data)
+ return 0;
+
+ if (conv_data->client_to_server == FALSE) {
+ memcpy(&qid, &conv_data->mad_private_data[178], 2);
+ return qid;
+ }
+ conv = find_conversation(pinfo->num, &pinfo->src, &pinfo->src,
+ PT_IBQP, conv_data->src_qp, conv_data->src_qp,
+ NO_ADDR_B|NO_PORT_B);
+ conv_data = get_conversion_data(conv);
+ if (!conv_data)
+ return 0;
+ memcpy(&qid, &conv_data->mad_private_data[178], 2);
+ return qid;
+}
+
+static struct nvme_rdma_q_ctx*
+find_add_q_ctx(packet_info *pinfo, conversation_t *conv)
+{
+ struct nvme_rdma_q_ctx *q_ctx;
+ guint16 qid;
+
+ q_ctx = (struct nvme_rdma_q_ctx*)conversation_get_proto_data(conv, proto_nvme_rdma);
+ if (!q_ctx) {
+ qid = find_nvme_qid(pinfo);
+ q_ctx = wmem_new(wmem_file_scope(), struct nvme_rdma_q_ctx);
+ q_ctx->n_q_ctx.pending_cmds = wmem_tree_new(wmem_file_scope());
+ q_ctx->n_q_ctx.done_cmds = wmem_tree_new(wmem_file_scope());
+ q_ctx->n_q_ctx.qid = qid;
+ conversation_add_proto_data(conv, proto_nvme_rdma, q_ctx);
+ }
+ return q_ctx;
+}
+
+static conversation_infiniband_data*
+find_ib_cm_conversation(packet_info *pinfo)
+{
+ conversation_t *conv;
+
+ conv = find_conversation(pinfo->num, &pinfo->src, &pinfo->dst,
+ PT_IBQP, pinfo->srcport, pinfo->destport, 0);
+ if (!conv)
+ return NULL;
+
+ return get_conversion_data(conv);
+}
+
+static void dissect_rdma_cm_req_packet(tvbuff_t *tvb, proto_tree *tree)
+{
+ proto_tree *cm_tree;
+ proto_item *ti, *qid_item;
+ /* private data is at offset of 36 bytes */
+ int offset = 36;
+ guint16 qid;
+
+ /* create display subtree for private data */
+ ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, offset, 32, ENC_NA);
+ cm_tree = proto_item_add_subtree(ti, ett_cm);
+
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_recfmt, tvb,
+ offset + 0, 2, ENC_LITTLE_ENDIAN);
+
+ qid_item = proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_qid, tvb,
+ offset + 2, 2, ENC_LITTLE_ENDIAN);
+ qid = tvb_get_guint16(tvb, offset + 2, ENC_LITTLE_ENDIAN);
+ proto_item_append_text(qid_item, " %s", qid ? "IOQ" : "AQ");
+
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_hrqsize, tvb,
+ offset + 4, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_hsqsize, tvb,
+ offset + 6, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_reserved, tvb,
+ offset + 8, 24, ENC_NA);
+}
+
+static void dissect_rdma_cm_rsp_packet(tvbuff_t *tvb, proto_tree *tree)
+{
+ proto_tree *cm_tree;
+ proto_item *ti;
+
+ /* create display subtree for the private datat that start at offset 0 */
+ ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, 32, ENC_NA);
+ cm_tree = proto_item_add_subtree(ti, ett_cm);
+
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rsp_recfmt, tvb,
+ 0, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rsp_crqsize, tvb,
+ 2, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rsp_reserved, tvb,
+ 4, 28, ENC_NA);
+}
+
+static void dissect_rdma_cm_rej_packet(tvbuff_t *tvb, proto_tree *tree)
+{
+ proto_tree *cm_tree;
+ proto_item *ti;
+
+ /* create display subtree for the private datat that start at offset 0 */
+ ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, 32, ENC_NA);
+ cm_tree = proto_item_add_subtree(ti, ett_cm);
+
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rej_recfmt, tvb,
+ 0, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rej_status, tvb,
+ 2, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rej_reserved, tvb,
+ 4, 28, ENC_NA);
+}
+
+static int dissect_rdma_cm_packet(tvbuff_t *tvb, proto_tree *tree,
+ guint16 cm_attribute_id)
+{
+ switch (cm_attribute_id) {
+ case ATTR_CM_REQ:
+ dissect_rdma_cm_req_packet(tvb, tree);
+ break;
+ case ATTR_CM_REP:
+ dissect_rdma_cm_rsp_packet(tvb, tree);
+ break;
+ case ATTR_CM_REJ:
+ dissect_rdma_cm_rej_packet(tvb, tree);
+ break;
+ default:
+ break;
+ }
+ return TRUE;
+}
+
+static int
+dissect_nvme_ib_cm(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
+ void *data)
+{
+ conversation_infiniband_data *conv_data = NULL;
+ struct infinibandinfo *info = (struct infinibandinfo *)data;
+
+ conv_data = find_ib_cm_conversation(pinfo);
+ if (!conv_data)
+ return FALSE;
+
+ col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_RDMA);
+ return dissect_rdma_cm_packet(tvb, tree, info->cm_attribute_id);
+}
+
+static void dissect_nvme_fabric_connect_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
+{
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_rsvd1, cmd_tvb,
+ 5, 19, ENC_NA);
+ dissect_nvme_cmd_sgl(cmd_tvb, cmd_tree, hf_nvme_rdma_cmd_connect_sgl1);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_recfmt, cmd_tvb,
+ 40, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_qid, cmd_tvb,
+ 42, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_sqsize, cmd_tvb,
+ 44, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_cattr, cmd_tvb,
+ 46, 1, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_rsvd2, cmd_tvb,
+ 47, 1, ENC_NA);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_kato, cmd_tvb,
+ 48, 4, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_rsvd3, cmd_tvb,
+ 52, 12, ENC_NA);
+}
+
+static guint8 dissect_nvme_fabric_prop_cmd_common(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
+{
+ proto_item *attr_item, *offset_item;
+ guint32 offset;
+ guint8 attr;
+
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_rsvd, cmd_tvb,
+ 5, 35, ENC_NA);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_rsvd1, cmd_tvb,
+ 40, 1, ENC_LITTLE_ENDIAN);
+ attr_item = proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_size, cmd_tvb,
+ 40, 1, ENC_LITTLE_ENDIAN);
+ attr = tvb_get_guint8(cmd_tvb, 40) & 0x7;
+ proto_item_append_text(attr_item, " %s",
+ val_to_str(attr, attr_size_tbl, "Reserved"));
+
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_rsvd2, cmd_tvb,
+ 41, 3, ENC_NA);
+
+ offset_item = proto_tree_add_item_ret_uint(cmd_tree, hf_nvme_rdma_cmd_prop_attr_offset,
+ cmd_tvb, 44, 4, ENC_LITTLE_ENDIAN, &offset);
+ proto_item_append_text(offset_item, " %s",
+ val_to_str(offset, prop_offset_tbl, "Unknown Property"));
+ return attr;
+}
+
+static void dissect_nvme_fabric_prop_get_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
+{
+ dissect_nvme_fabric_prop_cmd_common(cmd_tree, cmd_tvb);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_get_rsvd3, cmd_tvb,
+ 48, 16, ENC_NA);
+}
+
+static void dissect_nvme_fabric_prop_set_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
+{
+ guint8 attr;
+
+ attr = dissect_nvme_fabric_prop_cmd_common(cmd_tree, cmd_tvb);
+ if (attr == 0) {
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_4B_value, cmd_tvb,
+ 48, 4, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_4B_value_rsvd, cmd_tvb,
+ 52, 4, ENC_LITTLE_ENDIAN);
+ } else {
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_8B_value, cmd_tvb,
+ 48, 8, ENC_LITTLE_ENDIAN);
+ }
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_rsvd3, cmd_tvb,
+ 56, 8, ENC_NA);
+}
+
+static void dissect_nvme_fabric_generic_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
+{
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_generic_rsvd1, cmd_tvb,
+ 5, 35, ENC_NA);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_generic_field, cmd_tvb,
+ 40, 24, ENC_NA);
+}
+
+static struct nvme_rdma_cmd_ctx*
+bind_cmd_to_qctx(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
+ guint16 cmd_id)
+{
+ struct nvme_rdma_cmd_ctx *ctx;
+
+ if (!PINFO_FD_VISITED(pinfo)) {
+ ctx = wmem_new0(wmem_file_scope(), struct nvme_rdma_cmd_ctx);
+
+ nvme_add_cmd_to_pending_list(pinfo, q_ctx,
+ &ctx->n_cmd_ctx, (void*)ctx, cmd_id);
+ } else {
+ /* Already visited this frame */
+ ctx = (struct nvme_rdma_cmd_ctx*)
+ nvme_lookup_cmd_in_done_list(pinfo, q_ctx, cmd_id);
+ /* if we have already visited frame but haven't found completion yet,
+ * we won't find cmd in done q, so allocate a dummy ctx for doing
+ * rest of the processing.
+ */
+ if (!ctx)
+ ctx = wmem_new0(wmem_file_scope(), struct nvme_rdma_cmd_ctx);
+ }
+ return ctx;
+}
+
+static void
+dissect_nvme_fabric_cmd(tvbuff_t *nvme_tvb, proto_tree *nvme_tree,
+ struct nvme_rdma_cmd_ctx *cmd_ctx)
+{
+ proto_tree *cmd_tree;
+ tvbuff_t *cmd_tvb;
+ proto_item *ti, *opc_item, *fctype_item;
+ guint8 fctype;
+
+ fctype = tvb_get_guint8(nvme_tvb, 4);
+ cmd_ctx->fctype = fctype;
+
+ cmd_tvb = tvb_new_subset_length(nvme_tvb, 0, NVME_FABRIC_CMD_SIZE);
+
+ ti = proto_tree_add_item(nvme_tree, hf_nvme_rdma_cmd, cmd_tvb, 0,
+ NVME_FABRIC_CMD_SIZE, ENC_NA);
+ cmd_tree = proto_item_add_subtree(ti, ett_data);
+
+ opc_item = proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_opc, cmd_tvb,
+ 0, 1, ENC_LITTLE_ENDIAN);
+ proto_item_append_text(opc_item, "%s", " Fabric Cmd");
+
+ nvme_publish_cmd_to_cqe_link(cmd_tree, cmd_tvb, hf_nvme_rdma_cqe_pkt,
+ &cmd_ctx->n_cmd_ctx);
+
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_rsvd, cmd_tvb,
+ 1, 1, ENC_NA);
+ proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_cid, cmd_tvb,
+ 2, 2, ENC_LITTLE_ENDIAN);
+
+ fctype_item = proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_fctype, cmd_tvb,
+ 4, 1, ENC_LITTLE_ENDIAN);
+ proto_item_append_text(fctype_item, " %s",
+ val_to_str(fctype, fctype_tbl, "Unknown FcType"));
+
+ switch(fctype) {
+ case NVME_FCTYPE_CONNECT:
+ dissect_nvme_fabric_connect_cmd(cmd_tree, cmd_tvb);
+ break;
+ case NVME_FCTYPE_PROP_GET:
+ dissect_nvme_fabric_prop_get_cmd(cmd_tree, cmd_tvb);
+ break;
+ case NVME_FCTYPE_PROP_SET:
+ dissect_nvme_fabric_prop_set_cmd(cmd_tree, cmd_tvb);
+ break;
+ case NVME_FCTYPE_AUTH_RECV:
+ default:
+ dissect_nvme_fabric_generic_cmd(cmd_tree, cmd_tvb);
+ break;
+ }
+}
+
+static void
+dissect_nvme_rdma_cmd(tvbuff_t *nvme_tvb, packet_info *pinfo, proto_tree *root_tree,
+ proto_tree *nvme_tree, struct nvme_rdma_q_ctx *q_ctx)
+{
+ struct nvme_rdma_cmd_ctx *cmd_ctx;
+ guint16 cmd_id;
+ guint8 opcode;
+
+ opcode = tvb_get_guint8(nvme_tvb, 0);
+ cmd_id = tvb_get_guint16(nvme_tvb, 2, ENC_LITTLE_ENDIAN);
+ cmd_ctx = bind_cmd_to_qctx(pinfo, &q_ctx->n_q_ctx, cmd_id);
+ if (opcode == NVME_FABRIC_OPC) {
+ cmd_ctx->n_cmd_ctx.fabric = TRUE;
+ dissect_nvme_fabric_cmd(nvme_tvb, nvme_tree, cmd_ctx);
+ } else {
+ cmd_ctx->n_cmd_ctx.fabric = FALSE;
+ dissect_nvme_cmd(nvme_tvb, pinfo, root_tree, &q_ctx->n_q_ctx,
+ &cmd_ctx->n_cmd_ctx);
+ }
+}
+
+static void
+dissect_nvme_from_host(tvbuff_t *nvme_tvb, packet_info *pinfo,
+ proto_tree *root_tree, proto_tree *nvme_tree,
+ struct infinibandinfo *info,
+ struct nvme_rdma_q_ctx *q_ctx,
+ guint len)
+
+{
+ switch (info->opCode) {
+ case RC_SEND_ONLY:
+ if (len == NVME_FABRIC_CMD_SIZE)
+ dissect_nvme_rdma_cmd(nvme_tvb, pinfo, root_tree, nvme_tree, q_ctx);
+ else
+ proto_tree_add_item(nvme_tree, hf_nvme_rdma_from_host_unknown_data, nvme_tvb,
+ 0, len, ENC_NA);
+ break;
+ default:
+ proto_tree_add_item(nvme_tree, hf_nvme_rdma_from_host_unknown_data, nvme_tvb,
+ 0, len, ENC_NA);
+ break;
+ }
+}
+
+static void
+dissect_nvme_rdma_cqe_status_8B(proto_tree *cqe_tree, tvbuff_t *cqe_tvb,
+ struct nvme_rdma_cmd_ctx *cmd_ctx)
+{
+ switch (cmd_ctx->fctype) {
+ case NVME_FCTYPE_CONNECT:
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_connect_cntlid, cqe_tvb,
+ 0, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_connect_authreq, cqe_tvb,
+ 2, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_connect_rsvd, cqe_tvb,
+ 4, 4, ENC_NA);
+ break;
+ case NVME_FCTYPE_PROP_GET:
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_sts, cqe_tvb,
+ 0, 8, ENC_LITTLE_ENDIAN);
+ break;
+ case NVME_FCTYPE_PROP_SET:
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_prop_set_rsvd, cqe_tvb,
+ 0, 8, ENC_NA);
+ break;
+ case NVME_FCTYPE_AUTH_RECV:
+ default:
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_sts, cqe_tvb,
+ 0, 8, ENC_LITTLE_ENDIAN);
+ break;
+ };
+}
+
+static void
+dissect_nvme_fabric_cqe(tvbuff_t *nvme_tvb,
+ proto_tree *nvme_tree,
+ struct nvme_rdma_cmd_ctx *cmd_ctx)
+{
+ proto_tree *cqe_tree;
+ proto_item *ti;
+ tvbuff_t *cqe_tvb;
+
+ cqe_tvb = tvb_new_subset_length(nvme_tvb, 0, NVME_FABRIC_CQE_SIZE);
+
+ ti = proto_tree_add_item(nvme_tree, hf_nvme_rdma_cqe, nvme_tvb,
+ 0, NVME_FABRIC_CQE_SIZE, ENC_NA);
+ proto_item_append_text(ti, " (For Cmd: %s)", val_to_str(cmd_ctx->fctype,
+ fctype_tbl, "Unknown Cmd"));
+
+ cqe_tree = proto_item_add_subtree(ti, ett_data);
+
+ nvme_publish_cqe_to_cmd_link(cqe_tree, cqe_tvb, hf_nvme_rdma_cmd_pkt, &cmd_ctx->n_cmd_ctx);
+ nvme_publish_cmd_latency(cqe_tree, &cmd_ctx->n_cmd_ctx, hf_nvme_rdma_cmd_latency);
+
+ dissect_nvme_rdma_cqe_status_8B(cqe_tree, cqe_tvb, cmd_ctx);
+
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_sqhd, cqe_tvb,
+ 8, 2, ENC_NA);
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_rsvd, cqe_tvb,
+ 10, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_cid, cqe_tvb,
+ 12, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_status, cqe_tvb,
+ 14, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_status_rsvd, cqe_tvb,
+ 14, 2, ENC_LITTLE_ENDIAN);
+}
+
+static void
+dissect_nvme_rdma_cqe(tvbuff_t *nvme_tvb, packet_info *pinfo,
+ proto_tree *root_tree, proto_tree *nvme_tree,
+ struct nvme_rdma_q_ctx *q_ctx)
+{
+ struct nvme_rdma_cmd_ctx *cmd_ctx;
+ guint16 cmd_id;
+
+ cmd_id = tvb_get_guint16(nvme_tvb, 12, ENC_LITTLE_ENDIAN);
+
+ if (!PINFO_FD_VISITED(pinfo)) {
+
+ cmd_ctx = (struct nvme_rdma_cmd_ctx*)
+ nvme_lookup_cmd_in_pending_list(&q_ctx->n_q_ctx, cmd_id);
+ if (!cmd_ctx)
+ goto not_found;
+
+ /* we have already seen this cqe, or an identical one */
+ if (cmd_ctx->n_cmd_ctx.cqe_pkt_num)
+ goto not_found;
+
+ cmd_ctx->n_cmd_ctx.cqe_pkt_num = pinfo->num;
+ nvme_add_cmd_cqe_to_done_list(&q_ctx->n_q_ctx, &cmd_ctx->n_cmd_ctx, cmd_id);
+ } else {
+ /* Already visited this frame */
+ cmd_ctx = (struct nvme_rdma_cmd_ctx*)
+ nvme_lookup_cmd_in_done_list(pinfo, &q_ctx->n_q_ctx, cmd_id);
+ if (!cmd_ctx)
+ goto not_found;
+ }
+
+ nvme_update_cmd_end_info(pinfo, &cmd_ctx->n_cmd_ctx);
+
+ if (cmd_ctx->n_cmd_ctx.fabric)
+ dissect_nvme_fabric_cqe(nvme_tvb, nvme_tree, cmd_ctx);
+ else
+ dissect_nvme_cqe(nvme_tvb, pinfo, root_tree, &cmd_ctx->n_cmd_ctx);
+ return;
+
+not_found:
+ proto_tree_add_item(nvme_tree, hf_nvme_rdma_to_host_unknown_data, nvme_tvb,
+ 0, NVME_FABRIC_CQE_SIZE, ENC_NA);
+}
+
+static void
+dissect_nvme_to_host(tvbuff_t *nvme_tvb, packet_info *pinfo,
+ proto_tree *root_tree, proto_tree *nvme_tree,
+ struct infinibandinfo *info,
+ struct nvme_rdma_q_ctx *q_ctx, guint len)
+{
+ switch (info->opCode) {
+ case RC_SEND_ONLY:
+ if (len == NVME_FABRIC_CQE_SIZE)
+ dissect_nvme_rdma_cqe(nvme_tvb, pinfo, root_tree, nvme_tree, q_ctx);
+ else
+ proto_tree_add_item(nvme_tree, hf_nvme_rdma_to_host_unknown_data, nvme_tvb,
+ 0, len, ENC_NA);
+ break;
+ default:
+ proto_tree_add_item(nvme_tree, hf_nvme_rdma_to_host_unknown_data, nvme_tvb,
+ 0, len, ENC_NA);
+ break;
+ }
+}
+
+static int
+dissect_nvme_ib(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
+{
+ struct infinibandinfo *info = (struct infinibandinfo *)data;
+ conversation_infiniband_data *conv_data = NULL;
+ conversation_t *conv;
+ proto_tree *nvme_tree;
+ proto_item *ti;
+ struct nvme_rdma_q_ctx *q_ctx;
+ guint len = tvb_reported_length(tvb);
+
+ conv = find_ib_conversation(pinfo, &conv_data);
+ if (!conv)
+ return FALSE;
+
+ q_ctx = find_add_q_ctx(pinfo, conv);
+ if (!q_ctx)
+ return FALSE;
+
+ col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_RDMA);
+
+ ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, len, ENC_NA);
+ nvme_tree = proto_item_add_subtree(ti, ett_data);
+
+ nvme_publish_qid(nvme_tree, hf_nvme_rdma_cmd_qid, q_ctx->n_q_ctx.qid);
+
+ if (conv_data->client_to_server)
+ dissect_nvme_from_host(tvb, pinfo, tree, nvme_tree, info, q_ctx, len);
+ else
+ dissect_nvme_to_host(tvb, pinfo, tree, nvme_tree, info, q_ctx, len);
+
+ return TRUE;
+}
+
+void
+proto_register_nvme_rdma(void)
+{
+ module_t *nvme_rdma_module;
+ static hf_register_info hf[] = {
+ /* IB RDMA CM fields */
+ { &hf_nvme_rdma_cm_req_recfmt,
+ { "Recfmt", "nvme-rdma.cm.req.recfmt",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_req_qid,
+ { "Qid", "nvme-rdma.cm.req.qid",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_req_hrqsize,
+ { "HrqSize", "nvme-rdma.cm.req.hrqsize",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_req_hsqsize,
+ { "HsqSize", "nvme-rdma.cm.req.hsqsize",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_req_reserved,
+ { "Reserved", "nvme-rdma.cm.req.reserved",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_rsp_recfmt,
+ { "Recfmt", "nvme-rdma.cm.rsp.recfmt",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_rsp_crqsize,
+ { "CrqSize", "nvme-rdma.cm.rsp.crqsize",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_rsp_reserved,
+ { "Reserved", "nvme-rdma.cm.rsp.reserved",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_rej_recfmt,
+ { "Recfmt", "nvme-rdma.cm.rej.recfmt",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_rej_status,
+ { "Status", "nvme-rdma.cm.rej.status",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cm_rej_reserved,
+ { "Reserved", "nvme-rdma.cm.rej.reserved",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ /* IB RDMA NVMe Command fields */
+ { &hf_nvme_rdma_cmd,
+ { "Cmd", "nvme-rdma.cmd",
+ FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_from_host_unknown_data,
+ { "Dissection unsupported", "nvme-rdma.unknown_data",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_opc,
+ { "Opcode", "nvme-rdma.cmd.opc",
+ FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_rsvd,
+ { "Reserved", "nvme-rdma.cmd.rsvd",
+ FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_cid,
+ { "Command ID", "nvme-rdma.cmd.cid",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_fctype,
+ { "Fabric Cmd Type", "nvme-rdma.cmd.fctype",
+ FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_rsvd1,
+ { "Reserved", "nvme-rdma.cmd.connect.rsvd1",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_sgl1,
+ { "SGL1", "nvme-rdma.cmd.connect.sgl1",
+ FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_recfmt,
+ { "Record Format", "nvme-rdma.cmd.connect.recfmt",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_qid,
+ { "Queue ID", "nvme-rdma.cmd.connect.qid",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_sqsize,
+ { "SQ Size", "nvme-rdma.cmd.connect.sqsize",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_cattr,
+ { "Connect Attributes", "nvme-rdma.cmd.connect.cattr",
+ FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_rsvd2,
+ { "Reserved", "nvme-rdma.cmd.connect.rsvd2",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_kato,
+ { "Keep Alive Timeout", "nvme-rdma.cmd.connect.kato",
+ FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_connect_rsvd3,
+ { "Reserved", "nvme-rdma.cmd.connect.rsvd3",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_rsvd,
+ { "Reserved", "nvme-rdma.cmd.prop_attr.rsvd",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_rsvd1,
+ { "Reserved", "nvme-rdma.cmd.prop_attr.rsvd1",
+ FT_UINT8, BASE_HEX, NULL, 0xf8, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_size,
+ { "Property Size", "nvme-rdma.cmd.prop_attr.size",
+ FT_UINT8, BASE_HEX, NULL, 0x7, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_rsvd2,
+ { "Reserved", "nvme-rdma.cmd.prop_attr.rsvd2",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_offset,
+ { "Offset", "nvme-rdma.cmd.prop_attr.offset",
+ FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_get_rsvd3,
+ { "Reserved", "nvme-rdma.cmd.prop_attr.get.rsvd3",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_set_4B_value,
+ { "Value", "nvme-rdma.cmd.prop_attr.set.value.4B",
+ FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_set_4B_value_rsvd,
+ { "Reserved", "nvme-rdma.cmd.prop_attr.set.value.rsvd",
+ FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_set_8B_value,
+ { "Value", "nvme-rdma.cmd.prop_attr.set.value.8B",
+ FT_UINT64, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_prop_attr_set_rsvd3,
+ { "Reserved", "nvme-rdma.cmd.prop_attr.set.rsvd3",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_generic_rsvd1,
+ { "Reserved", "nvme-rdma.cmd.generic.rsvd1",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_generic_field,
+ { "Fabric Cmd specific field", "nvme-rdma.cmd.generic.field",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ /* IB RDMA NVMe Response fields */
+ { &hf_nvme_rdma_cqe,
+ { "Cqe", "nvme-rdma.cqe",
+ FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_sts,
+ { "Cmd specific Status", "nvme-rdma.cqe.sts",
+ FT_UINT64, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_sqhd,
+ { "SQ Head Pointer", "nvme-rdma.cqe.sqhd",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_rsvd,
+ { "Reserved", "nvme-rdma.cqe.rsvd",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_cid,
+ { "Command ID", "nvme-rdma.cqe.cid",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_status,
+ { "Status", "nvme-rdma.cqe.status",
+ FT_UINT16, BASE_HEX, NULL, 0xfffe, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_status_rsvd,
+ { "Reserved", "nvme-rdma.cqe.status.rsvd",
+ FT_UINT16, BASE_HEX, NULL, 0x1, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_connect_cntlid,
+ { "Controller ID", "nvme-rdma.cqe.connect.cntrlid",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_connect_authreq,
+ { "Authentication Required", "nvme-rdma.cqe.connect.authreq",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_connect_rsvd,
+ { "Reserved", "nvme-rdma.cqe.connect.rsvd",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cqe_prop_set_rsvd,
+ { "Reserved", "nvme-rdma.cqe.prop_set.rsvd",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_to_host_unknown_data,
+ { "Dissection unsupported", "nvme-rdma.unknown_data",
+ FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_rdma_cmd_pkt,
+ { "Fabric Cmd in", "nvme-rdma.cmd_pkt",
+ FT_FRAMENUM, BASE_NONE, NULL, 0,
+ "The Cmd for this transaction is in this frame", HFILL }
+ },
+ { &hf_nvme_rdma_cqe_pkt,
+ { "Fabric Cqe in", "nvme-rdma.cqe_pkt",
+ FT_FRAMENUM, BASE_NONE, NULL, 0,
+ "The Cqe for this transaction is in this frame", HFILL }
+ },
+ { &hf_nvme_rdma_cmd_latency,
+ { "Cmd Latency", "nvme-rdma.cmd_latency",
+ FT_DOUBLE, BASE_NONE, NULL, 0x0,
+ "The time between the command and completion, in usec", HFILL }
+ },
+ { &hf_nvme_rdma_cmd_qid,
+ { "Cmd Qid", "nvme-rdma.cmd.qid",
+ FT_UINT16, BASE_HEX, NULL, 0x0,
+ "Qid on which comamnd is issued", HFILL }
+ },
+ };
+ static gint *ett[] = {
+ &ett_cm,
+ &ett_data,
+ };
+
+ proto_nvme_rdma = proto_register_protocol("NVM Express Fabrics RDMA",
+ NVME_FABRICS_RDMA, "nvme-rdma");
+
+ proto_register_field_array(proto_nvme_rdma, hf, array_length(hf));
+ proto_register_subtree_array(ett, array_length(ett));
+
+ /* Register preferences */
+ //nvme_rdma_module = prefs_register_protocol(proto_nvme_rdma, proto_reg_handoff_nvme_rdma);
+ nvme_rdma_module = prefs_register_protocol(proto_nvme_rdma, NULL);
+
+ range_convert_str(&gPORT_RANGE, NVME_RDMA_TCP_PORT_RANGE, MAX_TCP_PORT);
+ prefs_register_range_preference(nvme_rdma_module,
+ "subsystem_ports",
+ "Subsystem Ports Range",
+ "Range of NVMe Subsystem ports"
+ "(default " NVME_RDMA_TCP_PORT_RANGE ")",
+ &gPORT_RANGE, MAX_TCP_PORT);
+}
+
+void
+proto_reg_handoff_nvme_rdma(void)
+{
+ heur_dissector_add("infiniband.mad.cm.private", dissect_nvme_ib_cm,
+ "NVMe Fabrics RDMA CM packets",
+ "nvme_rdma_cm_private", proto_nvme_rdma, HEURISTIC_ENABLE);
+ heur_dissector_add("infiniband.payload", dissect_nvme_ib,
+ "NVMe Fabrics RDMA packets",
+ "nvme_rdma", proto_nvme_rdma, HEURISTIC_ENABLE);
+ ib_handler = find_dissector_add_dependency("infiniband", proto_nvme_rdma);
+ proto_ib = dissector_handle_get_protocol_index(ib_handler);
+}
+
+/*
+ * Editor modelines - http://www.wireshark.org/tools/modelines.html
+ *
+ * Local variables:
+ * c-basic-offset: 4
+ * tab-width: 8
+ * indent-tabs-mode: nil
+ * End:
+ *
+ * vi: set shiftwidth=4 tabstop=8 expandtab:
+ * :indentSize=4:tabSize=8:noTabs=true:
+ */
diff --git a/epan/dissectors/packet-nvme.c b/epan/dissectors/packet-nvme.c
new file mode 100644
index 0000000000..c4c61d46c7
--- /dev/null
+++ b/epan/dissectors/packet-nvme.c
@@ -0,0 +1,551 @@
+/* packet-nvme.c
+ * Routines for NVM Express dissection
+ * Copyright 2016
+ * Code by Parav Pandit
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/* This file dissects NVMe packets received from the underlying
+ * fabric such as RDMA, FC.
+ * This is fabric agnostic dissector and depends on cmd_ctx and q_ctx
+ * It currently aligns to below specification.
+ * http://www.nvmexpress.org/wp-content/uploads/NVM-Express-1_2a.pdf
+ */
+
+#include "config.h"
+
+#include <stdlib.h>
+
+#include <epan/packet.h>
+
+#include "packet-nvme.h"
+
+void proto_register_nvme(void);
+
+static int proto_nvme = -1;
+
+/* NVMe Cmd fields */
+
+static int hf_nvme_cmd_opc = -1;
+static int hf_nvme_cmd_rsvd = -1;
+static int hf_nvme_cmd_cid = -1;
+static int hf_nvme_cmd_fuse_op = -1;
+static int hf_nvme_cmd_psdt = -1;
+static int hf_nvme_cmd_nsid = -1;
+static int hf_nvme_cmd_rsvd1 = -1;
+static int hf_nvme_cmd_mptr = -1;
+static int hf_nvme_cmd_sgl = -1;
+static int hf_nvme_cmd_sgl_desc_type = -1;
+static int hf_nvme_cmd_sgl_desc_sub_type = -1;
+
+/* NVMe CQE fields */
+static int hf_nvme_cqe_sts = -1;
+static int hf_nvme_cqe_sqhd = -1;
+static int hf_nvme_cqe_rsvd = -1;
+static int hf_nvme_cqe_cid = -1;
+static int hf_nvme_cqe_status = -1;
+static int hf_nvme_cqe_status_rsvd = -1;
+
+/* tracking Cmd and its respective CQE */
+static int hf_nvme_cmd_pkt = -1;
+static int hf_nvme_cqe_pkt = -1;
+static int hf_nvme_cmd_latency = -1;
+
+/* Initialize the subtree pointers */
+static gint ett_data = -1;
+
+#define NVME_AQ_OPC_DELETE_SQ 0x0
+#define NVME_AQ_OPC_CREATE_SQ 0x1
+#define NVME_AQ_OPC_GET_LOG_PAGE 0x2
+#define NVME_AQ_OPC_DELETE_CQ 0x4
+#define NVME_AQ_OPC_CREATE_CQ 0x5
+#define NVME_AQ_OPC_IDENTIFY 0x6
+#define NVME_AQ_OPC_ABORT 0x8
+#define NVME_AQ_OPC_SET_FEATURES 0x9
+#define NVME_AQ_OPC_GET_FEATURES 0xa
+#define NVME_AQ_OPC_ASYNC_EVE_REQ 0xc
+#define NVME_AQ_OPC_NS_MGMT 0xd
+#define NVME_AQ_OPC_FW_COMMIT 0x10
+#define NVME_AQ_OPC_FW_IMG_DOWNLOAD 0x11
+#define NVME_AQ_OPC_NS_ATTACH 0x15
+#define NVME_AQ_OPC_KEEP_ALIVE 0x18
+
+#define NVME_IOQ_OPC_FLUSH 0x0
+#define NVME_IOQ_OPC_WRITE 0x1
+#define NVME_IOQ_OPC_READ 0x2
+#define NVME_IOQ_OPC_WRITE_UNCORRECTABLE 0x4
+#define NVME_IOQ_OPC_COMPARE 0x5
+#define NVME_IOQ_OPC_WRITE_ZEROS 0x8
+#define NVME_IOQ_OPC_DATASET_MGMT 0x9
+#define NVME_IOQ_OPC_RESV_REG 0xd
+#define NVME_IOQ_OPC_RESV_REPORT 0xe
+#define NVME_IOQ_OPC_RESV_ACQUIRE 0x11
+#define NVME_IOQ_OPC_RESV_RELEASE 0x15
+
+#define NVME_CQE_SCT_GENERIC 0x0
+#define NVME_CQE_SCT_SPECIFIC 0x1
+#define NVME_CQE_SCT_MDI 0x2
+#define NVME_CQE_SCT_VENDOR 0x7
+
+#define NVME_CQE_SCODE_SUCCESS 0x0
+#define NVME_CQE_SCODE_INVALID_OPCODE 0x1
+#define NVME_CQE_SCODE_INVALID_FIELD 0x2
+#define NVME_CQE_SCODE_CID_CONFLICT 0x3
+#define NVME_CQE_SCODE_DATA_XFER_ERR 0x4
+#define NVME_CQE_SCODE_CMD_ABORTED 0x5
+#define NVME_CQE_SCODE_INTERNAL_ERR 0x6
+#define NVME_CQE_SCODE_CMD_ABORT_REQ 0x7
+#define NVME_CQE_SCODE_CMD_ABORT_SQD 0x8
+#define NVME_CQE_SCODE_CMD_ABORT_FF 0x9
+#define NVME_CQE_SCODE_CMD_ABORT_MF 0xa
+#define NVME_CQE_SCODE_INVALID_NS 0xb
+#define NVME_CQE_SCODE_CMD_SEQ_ERR 0xc
+
+#define NVME_CQE_SCODE_INVALID_SGL_DESC 0xd
+#define NVME_CQE_SCODE_INVALID_NUM_SGLS 0xe
+#define NVME_CQE_SCODE_INVALID_SGL_LEN 0xf
+#define NVME_CQE_SCODE_INVALID_MD_SGL_LEN 0x10
+#define NVME_CQE_SCODE_INVALID_SGL_DESC_TYPE 0x11
+#define NVME_CQE_SCODE_INVALID_CMB_USE 0x12
+#define NVME_CQE_SCODE_INVALID_PRP_OFFSET 0x13
+#define NVME_CQE_SCODE_INVALID_ATOMIC_WRITE_EXCEEDED 0x14
+#define NVME_CQE_SCODE_INVALID_SGL_OFFSET 0x16
+#define NVME_CQE_SCODE_INVALID_SGL_SUB_TYPE 0x17
+#define NVME_CQE_SCODE_INVALID_INCONSISTENT_HOSTID 0x18
+#define NVME_CQE_SCODE_INVALID_KA_TIMER_EXPIRED 0x19
+#define NVME_CQE_SCODE_INVALID_KA_TIMEOUT_INVALID 0x1a
+
+static const value_string aq_opc_tbl[] = {
+ { NVME_AQ_OPC_DELETE_SQ, "Delete SQ"},
+ { NVME_AQ_OPC_CREATE_SQ, "Create SQ"},
+ { NVME_AQ_OPC_GET_LOG_PAGE, "Get Log Page"},
+ { NVME_AQ_OPC_DELETE_CQ, "Delete CQ"},
+ { NVME_AQ_OPC_CREATE_CQ, "Create CQ"},
+ { NVME_AQ_OPC_IDENTIFY, "Identify"},
+ { NVME_AQ_OPC_ABORT, "Abort"},
+ { NVME_AQ_OPC_SET_FEATURES, "Set Features"},
+ { NVME_AQ_OPC_GET_FEATURES, "Get Features"},
+ { NVME_AQ_OPC_ASYNC_EVE_REQ, "Async Event Request"},
+ { NVME_AQ_OPC_NS_MGMT, "Namespace Management"},
+ { NVME_AQ_OPC_FW_COMMIT, "Firmware Commit"},
+ { NVME_AQ_OPC_FW_IMG_DOWNLOAD, "Firmware Image Download"},
+ { NVME_AQ_OPC_NS_ATTACH, "Namespace attach"},
+ { NVME_AQ_OPC_KEEP_ALIVE, "Kepp Alive"},
+ { 0, NULL}
+};
+
+static const value_string ioq_opc_tbl[] = {
+ { NVME_IOQ_OPC_FLUSH, "Flush"},
+ { NVME_IOQ_OPC_WRITE, "Write"},
+ { NVME_IOQ_OPC_READ, "Read"},
+ { NVME_IOQ_OPC_WRITE_UNCORRECTABLE, "Write Uncorrectable"},
+ { NVME_IOQ_OPC_COMPARE, "Compare"},
+ { NVME_IOQ_OPC_WRITE_ZEROS, "Write Zero"},
+ { NVME_IOQ_OPC_DATASET_MGMT, "Dataset Management"},
+ { NVME_IOQ_OPC_RESV_REG, "Reserve Register"},
+ { NVME_IOQ_OPC_RESV_REPORT, "Reserve Report"},
+ { NVME_IOQ_OPC_RESV_ACQUIRE, "Reserve Acquire"},
+ { NVME_IOQ_OPC_RESV_RELEASE, "Reserve Release"},
+ { 0, NULL}
+};
+
+#define NVME_CMD_SGL_DATA_DESC 0x0
+#define NVME_CMD_SGL_BIT_BUCKET_DESC 0x1
+#define NVME_CMD_SGL_SEGMENT_DESC 0x2
+#define NVME_CMD_SGL_LAST_SEGMENT_DESC 0x3
+#define NVME_CMD_SGL_KEYED_DATA_DESC 0x4
+#define NVME_CMD_SGL_VENDOR_DESC 0xf
+
+static const value_string sgl_type_tbl[] = {
+ { NVME_CMD_SGL_DATA_DESC, "Data Block"},
+ { NVME_CMD_SGL_BIT_BUCKET_DESC, "Bit Bucket"},
+ { NVME_CMD_SGL_SEGMENT_DESC, "Segment"},
+ { NVME_CMD_SGL_LAST_SEGMENT_DESC, "Last Segment"},
+ { NVME_CMD_SGL_KEYED_DATA_DESC, "Keyed Data Block"},
+ { NVME_CMD_SGL_VENDOR_DESC, "Vendor Specific"},
+ { 0, NULL}
+};
+
+#define NVME_CMD_SGL_SUB_DESC_ADDR 0x0
+#define NVME_CMD_SGL_SUB_DESC_OFFSET 0x1
+#define NVME_CMD_SGL_SUB_DESC_TRANSPORT 0xf
+
+static const value_string sgl_sub_type_tbl[] = {
+ { NVME_CMD_SGL_SUB_DESC_ADDR, "Address"},
+ { NVME_CMD_SGL_SUB_DESC_OFFSET, "Offset"},
+ { NVME_CMD_SGL_SUB_DESC_TRANSPORT, "Transport specific"},
+ { 0, NULL}
+};
+
+void
+nvme_publish_qid(proto_tree *tree, int field_index, guint16 qid)
+{
+ proto_item *cmd_ref_item;
+
+ cmd_ref_item = proto_tree_add_uint_format_value(tree, field_index, NULL,
+ 0, 0, qid,
+ qid ? "%d (IOQ)" : "%d (AQ)",
+ qid);
+
+ PROTO_ITEM_SET_GENERATED(cmd_ref_item);
+}
+
+static void nvme_build_pending_cmd_key(wmem_tree_key_t *cmd_key, guint32 *key)
+{
+ cmd_key[0].length = 1;
+ cmd_key[0].key = key;
+ cmd_key[1].length = 0;
+ cmd_key[1].key = NULL;
+}
+
+static void
+nvme_build_done_cmd_key(wmem_tree_key_t *cmd_key, guint32 *key, guint32 *frame_num)
+{
+ cmd_key[0].length = 1;
+ cmd_key[0].key = key;
+ cmd_key[1].length = 1;
+ cmd_key[1].key = frame_num;
+ cmd_key[2].length = 0;
+ cmd_key[2].key = NULL;
+}
+
+void
+nvme_add_cmd_to_pending_list(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
+ struct nvme_cmd_ctx *cmd_ctx,
+ void *ctx, guint16 cmd_id)
+{
+ wmem_tree_key_t cmd_key[3];
+ guint32 key = cmd_id;
+
+ cmd_ctx->cmd_pkt_num = pinfo->num;
+ cmd_ctx->cqe_pkt_num = 0;
+ cmd_ctx->cmd_start_time = pinfo->abs_ts;
+ nstime_set_zero(&cmd_ctx->cmd_end_time);
+
+ /* this is a new cmd, create a new command context and map it to the
+ unmatched table
+ */
+ nvme_build_pending_cmd_key(cmd_key, &key);
+ wmem_tree_insert32_array(q_ctx->pending_cmds, cmd_key, (void *)ctx);
+}
+
+void* nvme_lookup_cmd_in_pending_list(struct nvme_q_ctx *q_ctx, guint16 cmd_id)
+{
+ wmem_tree_key_t cmd_key[3];
+ guint32 key = cmd_id;
+
+ nvme_build_pending_cmd_key(cmd_key, &key);
+ return wmem_tree_lookup32_array(q_ctx->pending_cmds, cmd_key);
+}
+
+void
+nvme_add_cmd_cqe_to_done_list(struct nvme_q_ctx *q_ctx,
+ struct nvme_cmd_ctx *cmd_ctx, guint16 cmd_id)
+{
+ wmem_tree_key_t cmd_key[3];
+ guint32 key = cmd_id;
+ guint32 frame_num;
+
+ nvme_build_done_cmd_key(cmd_key, &key, &frame_num);
+
+ /* found matchng entry. Add entries to the matched table for both cmd and cqe.
+ */
+ frame_num = cmd_ctx->cqe_pkt_num;
+ wmem_tree_insert32_array(q_ctx->done_cmds, cmd_key, (void*)cmd_ctx);
+
+ frame_num = cmd_ctx->cmd_pkt_num;
+ wmem_tree_insert32_array(q_ctx->done_cmds, cmd_key, (void*)cmd_ctx);
+}
+
+void*
+nvme_lookup_cmd_in_done_list(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
+ guint16 cmd_id)
+{
+ wmem_tree_key_t cmd_key[3];
+ guint32 key = cmd_id;
+ guint32 frame_num = pinfo->num;
+
+ nvme_build_done_cmd_key(cmd_key, &key, &frame_num);
+
+ return wmem_tree_lookup32_array(q_ctx->done_cmds, cmd_key);
+}
+
+void
+nvme_publish_cmd_latency(proto_tree *tree, struct nvme_cmd_ctx *cmd_ctx,
+ int field_index)
+{
+ proto_item *cmd_ref_item;
+ nstime_t ns;
+ double cmd_latency;
+
+ nstime_delta(&ns, &cmd_ctx->cmd_end_time, &cmd_ctx->cmd_start_time);
+ cmd_latency = nstime_to_msec(&ns);
+ cmd_ref_item = proto_tree_add_double_format_value(tree, field_index,
+ NULL, 0, 0, cmd_latency,
+ "%.3f ms", cmd_latency);
+ PROTO_ITEM_SET_GENERATED(cmd_ref_item);
+}
+
+void nvme_update_cmd_end_info(packet_info *pinfo, struct nvme_cmd_ctx *cmd_ctx)
+{
+ cmd_ctx->cmd_end_time = pinfo->abs_ts;
+ cmd_ctx->cqe_pkt_num = pinfo->num;
+}
+
+void
+nvme_publish_cqe_to_cmd_link(proto_tree *cqe_tree, tvbuff_t *nvme_tvb,
+ int hf_index, struct nvme_cmd_ctx *cmd_ctx)
+{
+ proto_item *cqe_ref_item;
+ cqe_ref_item = proto_tree_add_uint(cqe_tree, hf_index,
+ nvme_tvb, 0, 0, cmd_ctx->cmd_pkt_num);
+ PROTO_ITEM_SET_GENERATED(cqe_ref_item);
+}
+
+void
+nvme_publish_cmd_to_cqe_link(proto_tree *cmd_tree, tvbuff_t *cmd_tvb,
+ int hf_index, struct nvme_cmd_ctx *cmd_ctx)
+{
+ proto_item *cmd_ref_item;
+
+ if (cmd_ctx->cqe_pkt_num) {
+ cmd_ref_item = proto_tree_add_uint(cmd_tree, hf_index,
+ cmd_tvb, 0, 0, cmd_ctx->cqe_pkt_num);
+ PROTO_ITEM_SET_GENERATED(cmd_ref_item);
+ }
+}
+
+void dissect_nvme_cmd_sgl(tvbuff_t *cmd_tvb, proto_tree *cmd_tree,
+ int field_index)
+{
+ proto_item *ti, *sgl_tree, *type_item, *sub_type_item;
+ guint8 sgl_identifier, desc_type, desc_sub_type;
+ int offset = 24;
+
+ ti = proto_tree_add_item(cmd_tree, field_index, cmd_tvb, offset,
+ 16, ENC_NA);
+ sgl_tree = proto_item_add_subtree(ti, ett_data);
+
+ sgl_identifier = tvb_get_guint8(cmd_tvb, offset + 15);
+ desc_type = (sgl_identifier & 0xff) >> 4;
+ desc_sub_type = sgl_identifier & 0x0f;
+
+ type_item = proto_tree_add_item(sgl_tree, hf_nvme_cmd_sgl_desc_type,
+ cmd_tvb, offset + 15, 1, ENC_LITTLE_ENDIAN);
+ proto_item_append_text(type_item, " %s",
+ val_to_str(desc_type, sgl_type_tbl, "Reserved"));
+
+ sub_type_item = proto_tree_add_item(sgl_tree, hf_nvme_cmd_sgl_desc_sub_type,
+ cmd_tvb,
+ offset + 15, 1, ENC_LITTLE_ENDIAN);
+ proto_item_append_text(sub_type_item, " %s",
+ val_to_str(desc_sub_type, sgl_sub_type_tbl, "Reserved"));
+}
+
+void
+dissect_nvme_cmd(tvbuff_t *nvme_tvb, packet_info *pinfo, proto_tree *root_tree,
+ struct nvme_q_ctx *q_ctx, struct nvme_cmd_ctx *cmd_ctx)
+{
+ proto_tree *cmd_tree;
+ tvbuff_t *cmd_tvb;
+ proto_item *ti, *opc_item;
+ guint8 opcode;
+
+ col_set_str(pinfo->cinfo, COL_PROTOCOL, "NVMe");
+ ti = proto_tree_add_item(root_tree, proto_nvme, nvme_tvb, 0,
+ NVME_CMD_SIZE, ENC_NA);
+ proto_item_append_text(ti, " (Cmd)");
+ cmd_tree = proto_item_add_subtree(ti, ett_data);
+ cmd_tvb = tvb_new_subset_length(nvme_tvb, 0, NVME_CMD_SIZE);
+
+ opcode = tvb_get_guint8(cmd_tvb, 0);
+ opc_item = proto_tree_add_item(cmd_tree, hf_nvme_cmd_opc, cmd_tvb,
+ 0, 1, ENC_LITTLE_ENDIAN);
+ if (q_ctx->qid)
+ proto_item_append_text(opc_item, " %s",
+ val_to_str(opcode, ioq_opc_tbl, "Reserved"));
+ else
+ proto_item_append_text(opc_item, " %s",
+ val_to_str(opcode, aq_opc_tbl, "Reserved"));
+
+ nvme_publish_cmd_to_cqe_link(cmd_tree, cmd_tvb, hf_nvme_cqe_pkt, cmd_ctx);
+
+ proto_tree_add_item(cmd_tree, hf_nvme_cmd_fuse_op, cmd_tvb,
+ 1, 1, ENC_NA);
+ proto_tree_add_item(cmd_tree, hf_nvme_cmd_rsvd, cmd_tvb,
+ 1, 1, ENC_NA);
+ proto_tree_add_item(cmd_tree, hf_nvme_cmd_psdt, cmd_tvb,
+ 1, 1, ENC_NA);
+ proto_tree_add_item(cmd_tree, hf_nvme_cmd_cid, cmd_tvb,
+ 2, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_cmd_nsid, cmd_tvb,
+ 4, 4, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_cmd_rsvd1, cmd_tvb,
+ 8, 8, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cmd_tree, hf_nvme_cmd_mptr, cmd_tvb,
+ 16, 8, ENC_LITTLE_ENDIAN);
+
+ dissect_nvme_cmd_sgl(cmd_tvb, cmd_tree, hf_nvme_cmd_sgl);
+}
+
+void
+dissect_nvme_cqe(tvbuff_t *nvme_tvb, packet_info *pinfo, proto_tree *root_tree,
+ struct nvme_cmd_ctx *cmd_ctx)
+{
+ proto_tree *cqe_tree;
+ proto_item *ti;
+
+ col_set_str(pinfo->cinfo, COL_PROTOCOL, "NVMe");
+ ti = proto_tree_add_item(root_tree, proto_nvme, nvme_tvb, 0,
+ NVME_CQE_SIZE, ENC_NA);
+ proto_item_append_text(ti, " (Cqe)");
+ cqe_tree = proto_item_add_subtree(ti, ett_data);
+
+ nvme_publish_cqe_to_cmd_link(cqe_tree, nvme_tvb, hf_nvme_cmd_pkt, cmd_ctx);
+ nvme_publish_cmd_latency(cqe_tree, cmd_ctx, hf_nvme_cmd_latency);
+
+ proto_tree_add_item(cqe_tree, hf_nvme_cqe_sts, nvme_tvb,
+ 0, 8, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_cqe_sqhd, nvme_tvb,
+ 8, 2, ENC_NA);
+ proto_tree_add_item(cqe_tree, hf_nvme_cqe_rsvd, nvme_tvb,
+ 10, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_cqe_cid, nvme_tvb,
+ 12, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_cqe_status, nvme_tvb,
+ 14, 2, ENC_LITTLE_ENDIAN);
+ proto_tree_add_item(cqe_tree, hf_nvme_cqe_status_rsvd, nvme_tvb,
+ 14, 2, ENC_LITTLE_ENDIAN);
+}
+
+void
+proto_register_nvme(void)
+{
+ static hf_register_info hf[] = {
+ /* NVMe Command fields */
+ { &hf_nvme_cmd_opc,
+ { "Opcode", "nvme.cmd.opc",
+ FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_fuse_op,
+ { "Fuse Operation", "nvme.cmd.fuse_op",
+ FT_UINT8, BASE_HEX, NULL, 0x3, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_rsvd,
+ { "Reserved", "nvme.cmd.rsvd",
+ FT_UINT8, BASE_HEX, NULL, 0x3c, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_psdt,
+ { "PRP Or SGL", "nvme.cmd.psdt",
+ FT_UINT8, BASE_HEX, NULL, 0xc0, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_cid,
+ { "Command ID", "nvme.cmd.cid",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_nsid,
+ { "Namespace Id", "nvme.cmd.nsid",
+ FT_UINT32, BASE_HEX, NULL, 0, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_rsvd1,
+ { "Reserved", "nvme.cmd.rsvd1",
+ FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_mptr,
+ { "Metadata Pointer", "nvme.cmd.mptr",
+ FT_UINT64, BASE_HEX, NULL, 0, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_sgl,
+ { "SGL1", "nvme.cmd.sgl1",
+ FT_NONE, BASE_NONE, NULL, 0, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_sgl_desc_sub_type,
+ { "Descriptor Sub Type", "nvme.cmd.sgl.subtype",
+ FT_UINT8, BASE_HEX, NULL, 0x0f, NULL, HFILL}
+ },
+
+ { &hf_nvme_cmd_sgl_desc_type,
+ { "Descriptor Type", "nvme.cmd.sgl.type",
+ FT_UINT8, BASE_HEX, NULL, 0xf0, NULL, HFILL}
+ },
+
+ /* NVMe Response fields */
+ { &hf_nvme_cqe_sts,
+ { "Cmd specific Status", "nvme.cqe.sts",
+ FT_UINT64, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_cqe_sqhd,
+ { "SQ Head Pointer", "nvme.cqe.sqhd",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_cqe_rsvd,
+ { "Reserved", "nvme.cqe.sqhd",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_cqe_cid,
+ { "Command ID", "nvme.cqe.cid",
+ FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
+ },
+ { &hf_nvme_cqe_status,
+ { "Status", "nvme.cqe.status",
+ FT_UINT16, BASE_HEX, NULL, 0xfffe, NULL, HFILL}
+ },
+ { &hf_nvme_cqe_status_rsvd,
+ { "Reserved", "nvme.cqe.status.rsvd",
+ FT_UINT16, BASE_HEX, NULL, 0x1, NULL, HFILL}
+ },
+ { &hf_nvme_cmd_pkt,
+ { "Cmd in", "nvme.cmd_pkt",
+ FT_FRAMENUM, BASE_NONE, NULL, 0,
+ "The Cmd for this transaction is in this frame", HFILL }
+ },
+ { &hf_nvme_cqe_pkt,
+ { "Cqe in", "nvme.cqe_pkt",
+ FT_FRAMENUM, BASE_NONE, NULL, 0,
+ "The Cqe for this transaction is in this frame", HFILL }
+ },
+ { &hf_nvme_cmd_latency,
+ { "Cmd Latency", "nvme.cmd_latency",
+ FT_DOUBLE, BASE_NONE, NULL, 0x0,
+ "The time between the command and completion, in usec", HFILL }
+ },
+ };
+ static gint *ett[] = {
+ &ett_data,
+ };
+
+ proto_nvme = proto_register_protocol("NVM Express", "nvme", "nvme");
+
+ proto_register_field_array(proto_nvme, hf, array_length(hf));
+ proto_register_subtree_array(ett, array_length(ett));
+}
+
+/*
+ * Editor modelines - http://www.wireshark.org/tools/modelines.html
+ *
+ * Local variables:
+ * c-basic-offset: 4
+ * tab-width: 8
+ * indent-tabs-mode: nil
+ * End:
+ *
+ * vi: set shiftwidth=4 tabstop=8 expandtab:
+ * :indentSize=4:tabSize=8:noTabs=true:
+ */
diff --git a/epan/dissectors/packet-nvme.h b/epan/dissectors/packet-nvme.h
new file mode 100644
index 0000000000..7f2fcdd2d2
--- /dev/null
+++ b/epan/dissectors/packet-nvme.h
@@ -0,0 +1,95 @@
+/* packet-nvme.h
+ * data structures for NVMe Dissection
+ * Copyright 2016
+ * Code by Parav Pandit
+ *
+ * Wireshark - Network traffic analyzer
+ * By Gerald Combs <gerald@wireshark.org>
+ * Copyright 1998 Gerald Combs
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef _PACKET_NVME_H_
+#define _PACKET_NVME_H_
+
+#define NVME_CMD_SIZE 64
+#define NVME_CQE_SIZE 16
+
+struct nvme_q_ctx {
+ wmem_tree_t *pending_cmds;
+ wmem_tree_t *done_cmds;
+ guint16 qid;
+};
+
+struct nvme_cmd_ctx {
+ guint32 cmd_pkt_num; /* pkt number of the cmd */
+ guint32 cqe_pkt_num; /* pkt number of the cqe */
+
+ nstime_t cmd_start_time;
+ nstime_t cmd_end_time;
+ gboolean fabric; /* indicate whether cmd fabric type or not */
+};
+
+void
+nvme_publish_qid(proto_tree *tree, int field_index, guint16 qid);
+
+void
+nvme_publish_cmd_latency(proto_tree *tree, struct nvme_cmd_ctx *cmd_ctx,
+ int field_index);
+void
+nvme_publish_cqe_to_cmd_link(proto_tree *cqe_tree, tvbuff_t *cqe_tvb,
+ int hf_index, struct nvme_cmd_ctx *cmd_ctx);
+void
+nvme_publish_cmd_to_cqe_link(proto_tree *cmd_tree, tvbuff_t *cqe_tvb,
+ int hf_index, struct nvme_cmd_ctx *cmd_ctx);
+
+void nvme_update_cmd_end_info(packet_info *pinfo, struct nvme_cmd_ctx *cmd_ctx);
+
+void
+nvme_add_cmd_to_pending_list(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
+ struct nvme_cmd_ctx *cmd_ctx,
+ void *ctx, guint16 cmd_id);
+void* nvme_lookup_cmd_in_pending_list(struct nvme_q_ctx *q_ctx, guint16 cmd_id);
+
+void
+nvme_add_cmd_cqe_to_done_list(struct nvme_q_ctx *q_ctx,
+ struct nvme_cmd_ctx *cmd_ctx, guint16 cmd_id);
+void*
+nvme_lookup_cmd_in_done_list(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
+ guint16 cmd_id);
+
+void dissect_nvme_cmd_sgl(tvbuff_t *cmd_tvb, proto_tree *cmd_tree,
+ int field_index);
+void
+dissect_nvme_cmd(tvbuff_t *nvme_tvb, packet_info *pinfo, proto_tree *root_tree,
+ struct nvme_q_ctx *q_ctx, struct nvme_cmd_ctx *cmd_ctx);
+void
+dissect_nvme_cqe(tvbuff_t *nvme_tvb, packet_info *pinfo, proto_tree *root_tree,
+ struct nvme_cmd_ctx *cmd_ctx);
+
+#endif
+
+/*
+ * Editor modelines - http://www.wireshark.org/tools/modelines.html
+ *
+ * Local variables:
+ * c-basic-offset: 4
+ * tab-width: 8
+ * indent-tabs-mode: nil
+ * End:
+ *
+ * vi: set shiftwidth=4 tabstop=8 expandtab:
+ * :indentSize=4:tabSize=8:noTabs=true:
+ */