[
{
"BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
"Counter": "0,1,2,3",
"EventName": "QPI_CTL_BANDWIDTH_TX",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
"ScaleUnit": "8Bytes",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
"Counter": "0,1,2,3",
"EventName": "QPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
"ScaleUnit": "8Bytes",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Total Write Cache Occupancy; Any Source",
"Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Total Write Cache Occupancy; Select Source",
"Counter": "0,1",
"EventCode": "0x12",
"EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Clocks in the IRP",
"Counter": "0,1",
"EventName": "UNC_I_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Number of clocks in the IRP.",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; CLFlush",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; CRd",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.CRD",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; DRd",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.DRD",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; PCIDCAHin5t",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; PCIRdCur",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; PCIItoM",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.PCITOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; RFO",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.RFO",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Coherent Ops; WbMtoI",
"Counter": "0,1",
"EventCode": "0x13",
"EventName": "UNC_I_COHERENT_OPS.WBMTOI",
"PerPkg": "1",
"PublicDescription": "Counts the number of coherency related operations servied by the IRP",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
"PerPkg": "1",
"PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_RD_INSERT",
"PerPkg": "1",
"PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.2ND_WR_INSERT",
"PerPkg": "1",
"PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REJ",
"PerPkg": "1",
"PublicDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Requests",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_REQ",
"PerPkg": "1",
"PublicDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.FAST_XFER",
"PerPkg": "1",
"PublicDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_ACK_HINT",
"PerPkg": "1",
"PublicDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_I_MISC0.PF_TIMEOUT",
"PerPkg": "1",
"PublicDescription": "Indicates the fetch for a previous prefetch wasn't accepted by the prefetch. This happens in the case of a prefetch TimeOut",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Data Throttled",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.DATA_THROTTLE",
"PerPkg": "1",
"PublicDescription": "IRP throttled switch data",
"UMask": "0x80",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.LOST_FWD",
"PerPkg": "1",
"PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Received Invalid",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Received Valid",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_E",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_I",
"PerPkg": "1",
"PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_M",
"PerPkg": "1",
"PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_I_MISC1.SLOW_S",
"PerPkg": "1",
"PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "AK Ingress Occupancy",
"Counter": "0,1",
"EventCode": "0xA",
"EventName": "UNC_I_RxR_AK_INSERTS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"Counter": "0,1",
"EventCode": "0x4",
"EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "BL Ingress Occupancy - DRS",
"Counter": "0,1",
"EventCode": "0x1",
"EventName": "UNC_I_RxR_BL_DRS_INSERTS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"Counter": "0,1",
"EventCode": "0x7",
"EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"Counter": "0,1",
"EventCode": "0x5",
"EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "BL Ingress Occupancy - NCB",
"Counter": "0,1",
"EventCode": "0x2",
"EventName": "UNC_I_RxR_BL_NCB_INSERTS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"Counter": "0,1",
"EventCode": "0x8",
"EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"Counter": "0,1",
"EventCode": "0x6",
"EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "BL Ingress Occupancy - NCS",
"Counter": "0,1",
"EventCode": "0x3",
"EventName": "UNC_I_RxR_BL_NCS_INSERTS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"Counter": "0,1",
"EventCode": "0x9",
"EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requests as well as outbound MMIO writes.",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Hit E or S",
"Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_ES",
"PerPkg": "1",
"PublicDescription": "Snoop Responses : Hit E or S",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Hit I",
"Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_I",
"PerPkg": "1",
"PublicDescription": "Snoop Responses : Hit I",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Hit M",
"Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.HIT_M",
"PerPkg": "1",
"PublicDescription": "Snoop Responses : Hit M",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; Miss",
"Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.MISS",
"PerPkg": "1",
"PublicDescription": "Snoop Responses : Miss",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpCode",
"Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPCODE",
"PerPkg": "1",
"PublicDescription": "Snoop Responses : SnpCode",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpData",
"Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPDATA",
"PerPkg": "1",
"PublicDescription": "Snoop Responses : SnpData",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Snoop Responses; SnpInv",
"Counter": "0,1",
"EventCode": "0x17",
"EventName": "UNC_I_SNOOP_RESP.SNPINV",
"PerPkg": "1",
"PublicDescription": "Snoop Responses : SnpInv",
"UMask": "0x40",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Atomic",
"Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.ATOMIC",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
"UMask": "0x10",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Other",
"Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.OTHER",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
"UMask": "0x20",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Read Prefetches",
"Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.RD_PREF",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
"UMask": "0x4",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Reads",
"Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.READS",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
"UMask": "0x1",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Writes",
"Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WRITES",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
"UMask": "0x2",
"Unit": "IRP"
},
{
"BriefDescription": "Inbound Transaction Count; Write Prefetches",
"Counter": "0,1",
"EventCode": "0x16",
"EventName": "UNC_I_TRANSACTIONS.WR_PREF",
"PerPkg": "1",
"PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of write prefetches.",
"UMask": "0x8",
"Unit": "IRP"
},
{
"BriefDescription": "No AD Egress Credit Stalls",
"Counter": "0,1",
"EventCode": "0x18",
"EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
"PerPkg": "1",
"PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "No BL Egress Credit Stalls",
"Counter": "0,1",
"EventCode": "0x19",
"EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
"PerPkg": "1",
"PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
"Counter": "0,1",
"EventCode": "0xE",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Read Requests",
"Counter": "0,1",
"EventCode": "0xF",
"EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
"Unit": "IRP"
},
{
"BriefDescription": "Outbound Request Queue Occupancy",
"Counter": "0,1",
"EventCode": "0xD",
"EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
"Unit": "IRP"
},
{
"BriefDescription": "Number of qfclks",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/4th the GT/s speed of the QPI link. For example, a 4GT/s link will have qfclk or 1GHz. HSX does not support dynamic link speeds, so this frequency is fixed.",
"Unit": "QPI"
},
{
"BriefDescription": "Count of CTO Events",
"Counter": "0,1,2,3",
"EventCode": "0x38",
"EventName": "UNC_Q_CTO_COUNT",
"PerPkg": "1",
"PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits. Had there been enough credits, the spawn would have worked as the RBT bit was set and the RBT tag matched.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and there weren't enough Egress credits. The valid bit was set.",
"UMask": "0x20",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits AND the RBT bit was not set, but the RBT tag matched.",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match, the valid bit was not set and there weren't enough Egress credits.",
"UMask": "0x80",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match although the valid bit was set and there were enough Egress credits.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the route-back table (RBT) specified that the transaction should not trigger a direct2core transaction. This is common for IO transactions. There were enough Egress credits and the RBT tag matched but the valid bit was not set.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and the valid bit was not set although there were enough Egress credits.",
"UMask": "0x40",
"Unit": "QPI"
},
{
"BriefDescription": "Direct 2 Core Spawning; Spawn Success",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
"PerPkg": "1",
"PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn was successful. There were sufficient credits, the RBT valid bit was set and there was an RBT tag match. The message was marked to spawn direct2core.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Cycles in L1",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_L1_POWER_CYCLES",
"PerPkg": "1",
"PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
"Unit": "QPI"
},
{
"BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
"PerPkg": "1",
"PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "QPI"
},
{
"BriefDescription": "Cycles in L0",
"Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL0_POWER_CYCLES",
"PerPkg": "1",
"PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Bypassed",
"Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_BYPASSED",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"Unit": "QPI"
},
{
"BriefDescription": "CRC Errors Detected; LinkInit",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
"PerPkg": "1",
"PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during link initialization.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "CRC Errors Detected; Normal Operations",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
"PerPkg": "1",
"PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during normal operation.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "VN0 Credit Consumed; DRS",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the DRS message class.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "VN0 Credit Consumed; HOM",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the HOM message class.",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "VN0 Credit Consumed; NCB",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCB message class.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "VN0 Credit Consumed; NCS",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCS message class.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "VN0 Credit Consumed; NDR",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NDR message class.",
"UMask": "0x20",
"Unit": "QPI"
},
{
"BriefDescription": "VN0 Credit Consumed; SNP",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the SNP message class.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "VN1 Credit Consumed; DRS",
"Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the DRS message class.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "VN1 Credit Consumed; HOM",
"Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the HOM message class.",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "VN1 Credit Consumed; NCB",
"Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCB message class.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "VN1 Credit Consumed; NCS",
"Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCS message class.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "VN1 Credit Consumed; NDR",
"Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NDR message class.",
"UMask": "0x20",
"Unit": "QPI"
},
{
"BriefDescription": "VN1 Credit Consumed; SNP",
"Counter": "0,1,2,3",
"EventCode": "0x39",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the SNP message class.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "VNA Credit Consumed",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty",
"Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_CYCLES_NE",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0xF",
"EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of flits received over QPI that do not hold protocol payload. When QPI is not in a power saving state, it continuously transmits flits across the link. When there are no protocol flits to send, it will send IDLE and NULL flits across. These flits sometimes do carry a payload, such as credit returns, but are generally not considered part of the QPI bandwidth.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data.",
"UMask": "0x18",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 1; DRS Data Flits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 1; DRS Header Flits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 1; HOM Flits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits received over QPI on the home channel.",
"UMask": "0x6",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits received over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 1; HOM Request Flits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request received over QPI on the home channel. This basically counts the number of remote memory requests received over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 1; SNP Flits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_RxL_FLITS_G1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits received over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are received on the home channel.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
"UMask": "0xc",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits received over QPI. This includes extended headers.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations",
"Counter": "0,1,2,3",
"EventCode": "0x8",
"EventName": "UNC_Q_RxL_INSERTS",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0xE",
"EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - All Packets",
"Counter": "0,1,2,3",
"EventCode": "0xB",
"EventName": "UNC_Q_RxL_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - DRS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - HOM; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x18",
"EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - NCB; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x16",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - NCS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x17",
"EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - NDR; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x1A",
"EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "RxQ Occupancy - SNP; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x19",
"EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x20",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet because there were insufficient BGF credits. For details on a message class granularity, use the Egress Credit Occupancy events.",
"UMask": "0x40",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
"Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "UNC_Q_RxL_STALLS_VN0.GV",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled because a GV transition (frequency transition) was taking place.",
"UMask": "0x80",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
"Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
"Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
"Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
"Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
"Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x20",
"Unit": "QPI"
},
{
"BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
"Counter": "0,1,2,3",
"EventCode": "0x3A",
"EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
"PerPkg": "1",
"PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "Cycles in L0p",
"Counter": "0,1,2,3",
"EventCode": "0xD",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
"PerPkg": "1",
"PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
"Unit": "QPI"
},
{
"BriefDescription": "Cycles in L0",
"Counter": "0,1,2,3",
"EventCode": "0xC",
"EventName": "UNC_Q_TxL0_POWER_CYCLES",
"PerPkg": "1",
"PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
"Unit": "QPI"
},
{
"BriefDescription": "Tx Flit Buffer Bypassed",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_Q_TxL_BYPASSED",
"PerPkg": "1",
"PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
"Unit": "QPI"
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
"PerPkg": "1",
"PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is almost full, we block some but not all packets.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
"PerPkg": "1",
"PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is totally full, we are not allowed to send any packets.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Tx Flit Buffer Cycles not Empty",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_Q_TxL_CYCLES_NE",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
"UMask": "0x18",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
"UMask": "0x6",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 1; SNP Flits",
"Counter": "0,1,2,3",
"EventName": "UNC_Q_TxL_FLITS_G1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
"UMask": "0xc",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
"UMask": "0x8",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits transmitted over QPI. This includes extended headers.",
"UMask": "0x10",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
"PerPkg": "1",
"PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "Tx Flit Buffer Allocations",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_Q_TxL_INSERTS",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
"Unit": "QPI"
},
{
"BriefDescription": "Tx Flit Buffer Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x7",
"EventName": "UNC_Q_TxL_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
"PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x26",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
"PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x22",
"EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
"PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x28",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
"PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x24",
"EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
"PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x27",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
"PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x23",
"EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
"Counter": "0,1,2,3",
"EventCode": "0x29",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
"Counter": "0,1,2,3",
"EventCode": "0x25",
"EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
"Counter": "0,1,2,3",
"EventCode": "0x2A",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
"Counter": "0,1,2,3",
"EventCode": "0x1F",
"EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
"UMask": "0x4",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x2B",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x2C",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
"PerPkg": "1",
"PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
"UMask": "0x1",
"Unit": "QPI"
},
{
"BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
"Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
"PerPkg": "1",
"PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
"UMask": "0x2",
"Unit": "QPI"
},
{
"BriefDescription": "VNA Credits Returned",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_Q_VNA_CREDIT_RETURNS",
"PerPkg": "1",
"PublicDescription": "Number of VNA credits returned.",
"Unit": "QPI"
},
{
"BriefDescription": "VNA Credits Pending Return - Occupancy",
"Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
"PerPkg": "1",
"PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
"Unit": "QPI"
},
{
"BriefDescription": "Number of uclks in domain",
"Counter": "0,1,2",
"EventCode": "0x1",
"EventName": "UNC_R3_CLOCKTICKS",
"PerPkg": "1",
"PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 10",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 11",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 12",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 13",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14_16",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 14&16",
"UMask": "0x40",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 8",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 9",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x1F",
"EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO_15_17",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 15&17",
"UMask": "0x80",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 0",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 1",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 2",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 3",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 4",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 5",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 6",
"UMask": "0x40",
"Unit": "R3QPI"
},
{
"BriefDescription": "CBox AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x22",
"EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
"PerPkg": "1",
"PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 7",
"UMask": "0x80",
"Unit": "R3QPI"
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
"PerPkg": "1",
"PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA0",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
"PerPkg": "1",
"PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA1",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
"PerPkg": "1",
"PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCB Messages",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "HA/R2 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2D",
"EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
"PerPkg": "1",
"PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCS Messages",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "IOT Backpressure",
"Counter": "0,1,2",
"EventCode": "0xB",
"EventName": "UNC_R3_IOT_BACKPRESSURE.HUB",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "IOT Backpressure",
"Counter": "0,1,2",
"EventCode": "0xB",
"EventName": "UNC_R3_IOT_BACKPRESSURE.SAT",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
"Counter": "0,1,2",
"EventCode": "0xD",
"EventName": "UNC_R3_IOT_CTS_HI.CTS2",
"PerPkg": "1",
"PublicDescription": "Debug Mask/Match Tie-Ins",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Hi",
"Counter": "0,1,2",
"EventCode": "0xD",
"EventName": "UNC_R3_IOT_CTS_HI.CTS3",
"PerPkg": "1",
"PublicDescription": "Debug Mask/Match Tie-Ins",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
"Counter": "0,1,2",
"EventCode": "0xC",
"EventName": "UNC_R3_IOT_CTS_LO.CTS0",
"PerPkg": "1",
"PublicDescription": "Debug Mask/Match Tie-Ins",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "IOT Common Trigger Sequencer - Lo",
"Counter": "0,1,2",
"EventCode": "0xC",
"EventName": "UNC_R3_IOT_CTS_LO.CTS1",
"PerPkg": "1",
"PublicDescription": "Debug Mask/Match Tie-Ins",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 HOM Messages",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 NDR Messages",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 SNP Messages",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 HOM Messages",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 NDR Messages",
"UMask": "0x40",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 SNP Messages",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x20",
"EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the AD Ring; VNA",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 HOM Messages",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 NDR Messages",
"UMask": "0x40",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 SNP Messages",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI0 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x21",
"EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI0 on the BL Ring; VNA",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 HOM Messages",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 NDR Messages",
"UMask": "0x40",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 SNP Messages",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 AD Credits Empty",
"Counter": "0,1",
"EventCode": "0x2E",
"EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the AD Ring; VNA",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 HOM Messages",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 NDR Messages",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 SNP Messages",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 HOM Messages",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 NDR Messages",
"UMask": "0x40",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 SNP Messages",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "QPI1 BL Credits Empty",
"Counter": "0,1",
"EventCode": "0x2F",
"EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
"PerPkg": "1",
"PublicDescription": "No credits available to send to QPI1 on the BL Ring; VNA",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise",
"Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
"UMask": "0xc",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
"Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
"Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise",
"Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
"UMask": "0x3",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
"Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
"Counter": "0,1,2",
"EventCode": "0x7",
"EventName": "UNC_R3_RING_AD_USED.CW_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise",
"Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
"UMask": "0xc",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
"Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
"Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise",
"Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
"UMask": "0x3",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
"Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
"Counter": "0,1,2",
"EventCode": "0x8",
"EventName": "UNC_R3_RING_AK_USED.CW_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise",
"Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
"UMask": "0xc",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
"Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
"Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise",
"Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
"UMask": "0x3",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
"Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
"Counter": "0,1,2",
"EventCode": "0x9",
"EventName": "UNC_R3_RING_BL_USED.CW_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 IV Ring in Use; Any",
"Counter": "0,1,2",
"EventCode": "0xA",
"EventName": "UNC_R3_RING_IV_USED.ANY",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
"UMask": "0xf",
"Unit": "R3QPI"
},
{
"BriefDescription": "R3 IV Ring in Use; Clockwise",
"Counter": "0,1,2",
"EventCode": "0xA",
"EventName": "UNC_R3_RING_IV_USED.CW",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
"UMask": "0x3",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ring Stop Starved; AK",
"Counter": "0,1,2",
"EventCode": "0xE",
"EventName": "UNC_R3_RING_SINK_STARVED.AK",
"PerPkg": "1",
"PublicDescription": "Number of cycles the ringstop is in starvation (per ring)",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Cycles Not Empty; HOM",
"Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Cycles Not Empty; NDR",
"Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Cycles Not Empty; SNP",
"Counter": "0,1",
"EventCode": "0x10",
"EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; DRS",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; HOM",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NCB",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NCS",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; NDR",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NDR",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Cycles Not Empty; SNP",
"Counter": "0,1",
"EventCode": "0x14",
"EventName": "UNC_R3_RxR_CYCLES_NE_VN1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Allocations; DRS",
"Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Allocations; HOM",
"Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Allocations; NCB",
"Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Allocations; NCS",
"Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Allocations; NDR",
"Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.NDR",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "Ingress Allocations; SNP",
"Counter": "0,1",
"EventCode": "0x11",
"EventName": "UNC_R3_RxR_INSERTS.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Allocations; DRS",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.DRS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Allocations; HOM",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.HOM",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Allocations; NCB",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NCB",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Allocations; NCS",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NCS",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Allocations; NDR",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.NDR",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Allocations; SNP",
"Counter": "0,1",
"EventCode": "0x15",
"EventName": "UNC_R3_RxR_INSERTS_VN1.SNP",
"PerPkg": "1",
"PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; DRS",
"Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.DRS",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; HOM",
"Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.HOM",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; HOM Ingress Queue",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NCB",
"Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCB",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NCB Ingress Queue",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NCS",
"Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCS",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NCS Ingress Queue",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; NDR",
"Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NDR",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NDR Ingress Queue",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Ingress Occupancy Accumulator; SNP",
"Counter": "0",
"EventCode": "0x13",
"EventName": "UNC_R3_RxR_OCCUPANCY_VN1.SNP",
"PerPkg": "1",
"PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; SNP Ingress Queue",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo0 Credits Acquired; For AD Ring",
"Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo0 Credits Acquired; For BL Ring",
"Counter": "0,1",
"EventCode": "0x28",
"EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
"Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
"Counter": "0",
"EventCode": "0x2A",
"EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo1 Credits Acquired; For AD Ring",
"Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo1 Credits Acquired; For BL Ring",
"Counter": "0,1",
"EventCode": "0x29",
"EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
"Counter": "0",
"EventCode": "0x2B",
"EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.AD",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
"Counter": "0",
"EventCode": "0x2B",
"EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.BL",
"PerPkg": "1",
"PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
"Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_AD",
"PerPkg": "1",
"PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
"Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_BL",
"PerPkg": "1",
"PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
"Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_AD",
"PerPkg": "1",
"PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
"Counter": "0,1",
"EventCode": "0x2C",
"EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_BL",
"PerPkg": "1",
"PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "Egress CCW NACK; AD CCW",
"Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_AD",
"PerPkg": "1",
"PublicDescription": "AD CounterClockwise Egress Queue",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
"Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_AK",
"PerPkg": "1",
"PublicDescription": "AK CounterClockwise Egress Queue",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
"Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.DN_BL",
"PerPkg": "1",
"PublicDescription": "BL CounterClockwise Egress Queue",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "Egress CCW NACK; AK CCW",
"Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_AD",
"PerPkg": "1",
"PublicDescription": "BL CounterClockwise Egress Queue",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "Egress CCW NACK; BL CW",
"Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_AK",
"PerPkg": "1",
"PublicDescription": "AD Clockwise Egress Queue",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "Egress CCW NACK; BL CCW",
"Counter": "0,1",
"EventCode": "0x26",
"EventName": "UNC_R3_TxR_NACK.UP_BL",
"PerPkg": "1",
"PublicDescription": "AD CounterClockwise Egress Queue",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
"Counter": "0,1",
"EventCode": "0x37",
"EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Used; DRS Message Class",
"Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Used; HOM Message Class",
"Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Used; NCB Message Class",
"Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Used; NCS Message Class",
"Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Used; NDR Message Class",
"Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN0 Credit Used; SNP Message Class",
"Counter": "0,1",
"EventCode": "0x36",
"EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
"PerPkg": "1",
"PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
"Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
"Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
"Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
"Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
"Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
"Counter": "0,1",
"EventCode": "0x39",
"EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
"PerPkg": "1",
"PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Used; DRS Message Class",
"Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Used; HOM Message Class",
"Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Used; NCB Message Class",
"Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Used; NCS Message Class",
"Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Used; NDR Message Class",
"Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VN1 Credit Used; SNP Message Class",
"Counter": "0,1",
"EventCode": "0x38",
"EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
"PerPkg": "1",
"PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
"Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
"PerPkg": "1",
"PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA credit Acquisitions; HOM Message Class",
"Counter": "0,1",
"EventCode": "0x33",
"EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
"PerPkg": "1",
"PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA Credit Reject; DRS Message Class",
"Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
"PerPkg": "1",
"PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
"UMask": "0x8",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA Credit Reject; HOM Message Class",
"Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
"PerPkg": "1",
"PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
"UMask": "0x1",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA Credit Reject; NCB Message Class",
"Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
"PerPkg": "1",
"PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
"UMask": "0x10",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA Credit Reject; NCS Message Class",
"Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
"PerPkg": "1",
"PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Standard (NCS).",
"UMask": "0x20",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA Credit Reject; NDR Message Class",
"Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
"PerPkg": "1",
"PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
"UMask": "0x4",
"Unit": "R3QPI"
},
{
"BriefDescription": "VNA Credit Reject; SNP Message Class",
"Counter": "0,1",
"EventCode": "0x34",
"EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
"PerPkg": "1",
"PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
"UMask": "0x2",
"Unit": "R3QPI"
},
{
"BriefDescription": "Bounce Control",
"Counter": "0,1,2,3",
"EventCode": "0xA",
"EventName": "UNC_S_BOUNCE_CONTROL",
"PerPkg": "1",
"Unit": "SBOX"
},
{
"BriefDescription": "Uncore Clocks",
"Counter": "0,1,2,3",
"EventName": "UNC_S_CLOCKTICKS",
"PerPkg": "1",
"Unit": "SBOX"
},
{
"BriefDescription": "FaST wire asserted",
"Counter": "0,1,2,3",
"EventCode": "0x9",
"EventName": "UNC_S_FAST_ASSERTED",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
"Unit": "SBOX"
},
{
"BriefDescription": "AD Ring In Use; Down",
"Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0xc",
"Unit": "SBOX"
},
{
"BriefDescription": "AD Ring In Use; Down and Event",
"Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "AD Ring In Use; Down and Odd",
"Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.DOWN_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "AD Ring In Use; Up",
"Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
"Unit": "SBOX"
},
{
"BriefDescription": "AD Ring In Use; Up and Even",
"Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "AD Ring In Use; Up and Odd",
"Counter": "0,1,2,3",
"EventCode": "0x1B",
"EventName": "UNC_S_RING_AD_USED.UP_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "AK Ring In Use; Down",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0xc",
"Unit": "SBOX"
},
{
"BriefDescription": "AK Ring In Use; Down and Event",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "AK Ring In Use; Down and Odd",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.DOWN_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "AK Ring In Use; Up",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
"Unit": "SBOX"
},
{
"BriefDescription": "AK Ring In Use; Up and Even",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "AK Ring In Use; Up and Odd",
"Counter": "0,1,2,3",
"EventCode": "0x1C",
"EventName": "UNC_S_RING_AK_USED.UP_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Down",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0xc",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Down and Event",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Down and Odd",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.DOWN_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Up",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Up and Even",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP_EVEN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Up and Odd",
"Counter": "0,1,2,3",
"EventCode": "0x1D",
"EventName": "UNC_S_RING_BL_USED.UP_ODD",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.AD_CACHE",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.AK_CORE",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.BL_CORE",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
"Counter": "0,1,2,3",
"EventCode": "0x5",
"EventName": "UNC_S_RING_BOUNCES.IV_CORE",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Any",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_S_RING_IV_USED.DN",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
"UMask": "0xc",
"Unit": "SBOX"
},
{
"BriefDescription": "BL Ring in Use; Any",
"Counter": "0,1,2,3",
"EventCode": "0x1E",
"EventName": "UNC_S_RING_IV_USED.UP",
"PerPkg": "1",
"PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
"UMask": "0x3",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.AD_CACHE",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.AD_CACHE",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.AK_CORE",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.AK_CORE",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.BL_CORE",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.BL_CORE",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_S_RING_SINK_STARVED.IV_CORE",
"Counter": "0,1,2,3",
"EventCode": "0x6",
"EventName": "UNC_S_RING_SINK_STARVED.IV_CORE",
"PerPkg": "1",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; AD - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.AD_BNC",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; AD - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.AD_CRD",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; BL - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.BL_BNC",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; BL - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x15",
"EventName": "UNC_S_RxR_BUSY_STARVED.BL_CRD",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Bypass; AD - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AD_BNC",
"PerPkg": "1",
"PublicDescription": "Bypass the Sbo Ingress.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Bypass; AD - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AD_CRD",
"PerPkg": "1",
"PublicDescription": "Bypass the Sbo Ingress.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Bypass; AK",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.AK",
"PerPkg": "1",
"PublicDescription": "Bypass the Sbo Ingress.",
"UMask": "0x10",
"Unit": "SBOX"
},
{
"BriefDescription": "Bypass; BL - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.BL_BNC",
"PerPkg": "1",
"PublicDescription": "Bypass the Sbo Ingress.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Bypass; BL - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.BL_CRD",
"PerPkg": "1",
"PublicDescription": "Bypass the Sbo Ingress.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Bypass; IV",
"Counter": "0,1,2,3",
"EventCode": "0x12",
"EventName": "UNC_S_RxR_BYPASS.IV",
"PerPkg": "1",
"PublicDescription": "Bypass the Sbo Ingress.",
"UMask": "0x20",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; AD - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AD_BNC",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; AD - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AD_CRD",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; AK",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.AK",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
"UMask": "0x10",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; BL - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.BL_BNC",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; BL - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.BL_CRD",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; IVF Credit",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.IFV",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
"UMask": "0x40",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; IV",
"Counter": "0,1,2,3",
"EventCode": "0x14",
"EventName": "UNC_S_RxR_CRD_STARVED.IV",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
"UMask": "0x20",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Allocations; AD - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AD_BNC",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Allocations; AD - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AD_CRD",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Allocations; AK",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.AK",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
"UMask": "0x10",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Allocations; BL - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.BL_BNC",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Allocations; BL - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.BL_CRD",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Allocations; IV",
"Counter": "0,1,2,3",
"EventCode": "0x13",
"EventName": "UNC_S_RxR_INSERTS.IV",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
"UMask": "0x20",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Occupancy; AD - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AD_BNC",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Occupancy; AD - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AD_CRD",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Occupancy; AK",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.AK",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
"UMask": "0x10",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Occupancy; BL - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.BL_BNC",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Occupancy; BL - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.BL_CRD",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Ingress Occupancy; IV",
"Counter": "0,1,2,3",
"EventCode": "0x11",
"EventName": "UNC_S_RxR_OCCUPANCY.IV",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
"UMask": "0x20",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.AD",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.AD",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.AK",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.AK",
"PerPkg": "1",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_S_TxR_ADS_USED.BL",
"Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "UNC_S_TxR_ADS_USED.BL",
"PerPkg": "1",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Allocations; AD - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AD_BNC",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Allocations; AD - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AD_CRD",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Allocations; AK",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.AK",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
"UMask": "0x10",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Allocations; BL - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.BL_BNC",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Allocations; BL - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.BL_CRD",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Allocations; IV",
"Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_S_TxR_INSERTS.IV",
"PerPkg": "1",
"PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
"UMask": "0x20",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Occupancy; AD - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AD_BNC",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Occupancy; AD - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AD_CRD",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Occupancy; AK",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.AK",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
"UMask": "0x10",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Occupancy; BL - Bounces",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.BL_BNC",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Occupancy; BL - Credits",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.BL_CRD",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Egress Occupancy; IV",
"Counter": "0,1,2,3",
"EventCode": "0x1",
"EventName": "UNC_S_TxR_OCCUPANCY.IV",
"PerPkg": "1",
"PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
"UMask": "0x20",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; Onto AD Ring",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.AD",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
"UMask": "0x1",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; Onto AK Ring",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.AK",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
"UMask": "0x2",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; Onto BL Ring",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.BL",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
"UMask": "0x4",
"Unit": "SBOX"
},
{
"BriefDescription": "Injection Starvation; Onto IV Ring",
"Counter": "0,1,2,3",
"EventCode": "0x3",
"EventName": "UNC_S_TxR_STARVED.IV",
"PerPkg": "1",
"PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
"UMask": "0x8",
"Unit": "SBOX"
},
{
"BriefDescription": "UNC_U_CLOCKTICKS",
"Counter": "0,1",
"EventName": "UNC_U_CLOCKTICKS",
"PerPkg": "1",
"Unit": "UBOX"
},
{
"BriefDescription": "VLW Received",
"Counter": "0,1",
"EventCode": "0x42",
"EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
"PerPkg": "1",
"PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "Filter Match",
"Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.DISABLE",
"PerPkg": "1",
"PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "Filter Match",
"Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.ENABLE",
"PerPkg": "1",
"PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "Filter Match",
"Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
"PerPkg": "1",
"PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "Filter Match",
"Counter": "0,1",
"EventCode": "0x41",
"EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
"PerPkg": "1",
"PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
"Counter": "0,1",
"EventCode": "0x45",
"EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
"PerPkg": "1",
"PublicDescription": "PHOLD cycles. Filter from source CoreID.",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "RACU Request",
"Counter": "0,1",
"EventCode": "0x46",
"EventName": "UNC_U_RACU_REQUESTS",
"PerPkg": "1",
"PublicDescription": "Number outstanding register requests within message channel tracker",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.CMC",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores",
"UMask": "0x10",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; Livelock",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
"UMask": "0x4",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; LTError",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.LTERROR",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
"UMask": "0x8",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T0",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
"UMask": "0x1",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; Monitor T1",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
"UMask": "0x2",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; Other",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.OTHER",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores; PREQ, PSMI, P2U, Thermal, PCUSMI, PMI",
"UMask": "0x80",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; Trap",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.TRAP",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores",
"UMask": "0x40",
"Unit": "UBOX"
},
{
"BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
"Counter": "0,1",
"EventCode": "0x43",
"EventName": "UNC_U_U2C_EVENTS.UMC",
"PerPkg": "1",
"PublicDescription": "Events coming from Uncore can be sent to one or all cores",
"UMask": "0x20",
"Unit": "UBOX"
}
]