2021-06-16 10:52:55 -04:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
/*
|
|
|
|
* Shared Memory Communications over RDMA (SMC-R) and RoCE
|
|
|
|
*
|
|
|
|
* Macros for SMC statistics
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2021
|
|
|
|
*
|
|
|
|
* Author(s): Guvenc Gulce
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef NET_SMC_SMC_STATS_H_
|
|
|
|
#define NET_SMC_SMC_STATS_H_
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/smc.h>
|
|
|
|
|
|
|
|
#include "smc_clc.h"
|
|
|
|
|
|
|
|
#define SMC_MAX_FBACK_RSN_CNT 30
|
|
|
|
|
|
|
|
enum {
|
|
|
|
SMC_BUF_8K,
|
|
|
|
SMC_BUF_16K,
|
|
|
|
SMC_BUF_32K,
|
|
|
|
SMC_BUF_64K,
|
|
|
|
SMC_BUF_128K,
|
|
|
|
SMC_BUF_256K,
|
|
|
|
SMC_BUF_512K,
|
|
|
|
SMC_BUF_1024K,
|
|
|
|
SMC_BUF_G_1024K,
|
|
|
|
SMC_BUF_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct smc_stats_fback {
|
|
|
|
int fback_code;
|
|
|
|
u16 count;
|
|
|
|
};
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
struct smc_stats_rsn {
|
2021-06-16 10:52:55 -04:00
|
|
|
struct smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
|
|
|
|
struct smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
|
|
|
|
u64 srv_fback_cnt;
|
|
|
|
u64 clnt_fback_cnt;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct smc_stats_rmbcnt {
|
|
|
|
u64 buf_size_small_peer_cnt;
|
|
|
|
u64 buf_size_small_cnt;
|
|
|
|
u64 buf_full_peer_cnt;
|
|
|
|
u64 buf_full_cnt;
|
|
|
|
u64 reuse_cnt;
|
|
|
|
u64 alloc_cnt;
|
|
|
|
u64 dgrade_cnt;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct smc_stats_memsize {
|
|
|
|
u64 buf[SMC_BUF_MAX];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct smc_stats_tech {
|
|
|
|
struct smc_stats_memsize tx_rmbsize;
|
|
|
|
struct smc_stats_memsize rx_rmbsize;
|
|
|
|
struct smc_stats_memsize tx_pd;
|
|
|
|
struct smc_stats_memsize rx_pd;
|
|
|
|
struct smc_stats_rmbcnt rmb_tx;
|
|
|
|
struct smc_stats_rmbcnt rmb_rx;
|
|
|
|
u64 clnt_v1_succ_cnt;
|
|
|
|
u64 clnt_v2_succ_cnt;
|
|
|
|
u64 srv_v1_succ_cnt;
|
|
|
|
u64 srv_v2_succ_cnt;
|
|
|
|
u64 urg_data_cnt;
|
|
|
|
u64 splice_cnt;
|
|
|
|
u64 cork_cnt;
|
|
|
|
u64 ndly_cnt;
|
|
|
|
u64 rx_bytes;
|
|
|
|
u64 tx_bytes;
|
|
|
|
u64 rx_cnt;
|
|
|
|
u64 tx_cnt;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct smc_stats {
|
|
|
|
struct smc_stats_tech smc[2];
|
|
|
|
u64 clnt_hshake_err_cnt;
|
|
|
|
u64 srv_hshake_err_cnt;
|
|
|
|
};
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \
|
2021-06-16 10:52:55 -04:00
|
|
|
do { \
|
2021-06-16 10:52:58 -04:00
|
|
|
typeof(_smc_stats) stats = (_smc_stats); \
|
2021-06-16 10:52:55 -04:00
|
|
|
typeof(_tech) t = (_tech); \
|
|
|
|
typeof(_len) l = (_len); \
|
net/smc: Fix pos miscalculation in statistics
SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) will calculate
wrong bucket positions for payloads of exactly 4096 bytes and
(1 << (m + 12)) bytes, with m == SMC_BUF_MAX - 1.
Intended bucket distribution:
Assume l == size of payload, m == SMC_BUF_MAX - 1.
Bucket 0 : 0 < l <= 2^13
Bucket n, 1 <= n <= m-1 : 2^(n+12) < l <= 2^(n+13)
Bucket m : l > 2^(m+12)
Current solution:
_pos = fls64((l) >> 13)
[...]
_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m
For l == 4096, _pos == -1, but should be _pos == 0.
For l == (1 << (m + 12)), _pos == m, but should be _pos == m - 1.
In order to avoid special treatment of these corner cases, the
calculation is adjusted. The new solution first subtracts the length by
one, and then calculates the correct bucket by shifting accordingly,
i.e. _pos = fls64((l - 1) >> 13), l > 0.
This not only fixes the issues named above, but also makes the whole
bucket assignment easier to follow.
Same is done for SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len),
where the calculation of the bucket position is similar to the one
named above.
Fixes: e0e4b8fa5338 ("net/smc: Add SMC statistics support")
Suggested-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Nils Hoppmann <niho@linux.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-09 10:40:48 -04:00
|
|
|
int _pos; \
|
2021-06-16 10:52:55 -04:00
|
|
|
typeof(_rc) r = (_rc); \
|
|
|
|
int m = SMC_BUF_MAX - 1; \
|
2021-06-16 10:52:58 -04:00
|
|
|
this_cpu_inc((*stats).smc[t].key ## _cnt); \
|
net/smc: Fix pos miscalculation in statistics
SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) will calculate
wrong bucket positions for payloads of exactly 4096 bytes and
(1 << (m + 12)) bytes, with m == SMC_BUF_MAX - 1.
Intended bucket distribution:
Assume l == size of payload, m == SMC_BUF_MAX - 1.
Bucket 0 : 0 < l <= 2^13
Bucket n, 1 <= n <= m-1 : 2^(n+12) < l <= 2^(n+13)
Bucket m : l > 2^(m+12)
Current solution:
_pos = fls64((l) >> 13)
[...]
_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m
For l == 4096, _pos == -1, but should be _pos == 0.
For l == (1 << (m + 12)), _pos == m, but should be _pos == m - 1.
In order to avoid special treatment of these corner cases, the
calculation is adjusted. The new solution first subtracts the length by
one, and then calculates the correct bucket by shifting accordingly,
i.e. _pos = fls64((l - 1) >> 13), l > 0.
This not only fixes the issues named above, but also makes the whole
bucket assignment easier to follow.
Same is done for SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len),
where the calculation of the bucket position is similar to the one
named above.
Fixes: e0e4b8fa5338 ("net/smc: Add SMC statistics support")
Suggested-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Nils Hoppmann <niho@linux.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-09 10:40:48 -04:00
|
|
|
if (r <= 0 || l <= 0) \
|
2021-06-16 10:52:55 -04:00
|
|
|
break; \
|
net/smc: Fix pos miscalculation in statistics
SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) will calculate
wrong bucket positions for payloads of exactly 4096 bytes and
(1 << (m + 12)) bytes, with m == SMC_BUF_MAX - 1.
Intended bucket distribution:
Assume l == size of payload, m == SMC_BUF_MAX - 1.
Bucket 0 : 0 < l <= 2^13
Bucket n, 1 <= n <= m-1 : 2^(n+12) < l <= 2^(n+13)
Bucket m : l > 2^(m+12)
Current solution:
_pos = fls64((l) >> 13)
[...]
_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m
For l == 4096, _pos == -1, but should be _pos == 0.
For l == (1 << (m + 12)), _pos == m, but should be _pos == m - 1.
In order to avoid special treatment of these corner cases, the
calculation is adjusted. The new solution first subtracts the length by
one, and then calculates the correct bucket by shifting accordingly,
i.e. _pos = fls64((l - 1) >> 13), l > 0.
This not only fixes the issues named above, but also makes the whole
bucket assignment easier to follow.
Same is done for SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len),
where the calculation of the bucket position is similar to the one
named above.
Fixes: e0e4b8fa5338 ("net/smc: Add SMC statistics support")
Suggested-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Nils Hoppmann <niho@linux.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-09 10:40:48 -04:00
|
|
|
_pos = fls64((l - 1) >> 13); \
|
|
|
|
_pos = (_pos <= m) ? _pos : m; \
|
2021-06-16 10:52:58 -04:00
|
|
|
this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
|
|
|
|
this_cpu_add((*stats).smc[t].key ## _bytes, r); \
|
2021-06-16 10:52:55 -04:00
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
|
|
|
#define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
|
|
|
|
do { \
|
|
|
|
typeof(_smc) __smc = _smc; \
|
2021-06-16 10:52:58 -04:00
|
|
|
struct net *_net = sock_net(&__smc->sk); \
|
|
|
|
struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
|
2021-06-16 10:52:55 -04:00
|
|
|
typeof(length) _len = (length); \
|
|
|
|
typeof(rcode) _rc = (rcode); \
|
|
|
|
bool is_smcd = !__smc->conn.lnk; \
|
|
|
|
if (is_smcd) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \
|
2021-06-16 10:52:55 -04:00
|
|
|
else \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \
|
2021-06-16 10:52:55 -04:00
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
|
|
|
#define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
|
|
|
|
do { \
|
|
|
|
typeof(_smc) __smc = _smc; \
|
2021-06-16 10:52:58 -04:00
|
|
|
struct net *_net = sock_net(&__smc->sk); \
|
|
|
|
struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
|
2021-06-16 10:52:55 -04:00
|
|
|
typeof(length) _len = (length); \
|
|
|
|
typeof(rcode) _rc = (rcode); \
|
|
|
|
bool is_smcd = !__smc->conn.lnk; \
|
|
|
|
if (is_smcd) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \
|
2021-06-16 10:52:55 -04:00
|
|
|
else \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \
|
2021-06-16 10:52:55 -04:00
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len) \
|
2021-06-16 10:52:55 -04:00
|
|
|
do { \
|
|
|
|
typeof(_len) _l = (_len); \
|
|
|
|
typeof(_tech) t = (_tech); \
|
net/smc: Fix pos miscalculation in statistics
SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) will calculate
wrong bucket positions for payloads of exactly 4096 bytes and
(1 << (m + 12)) bytes, with m == SMC_BUF_MAX - 1.
Intended bucket distribution:
Assume l == size of payload, m == SMC_BUF_MAX - 1.
Bucket 0 : 0 < l <= 2^13
Bucket n, 1 <= n <= m-1 : 2^(n+12) < l <= 2^(n+13)
Bucket m : l > 2^(m+12)
Current solution:
_pos = fls64((l) >> 13)
[...]
_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m
For l == 4096, _pos == -1, but should be _pos == 0.
For l == (1 << (m + 12)), _pos == m, but should be _pos == m - 1.
In order to avoid special treatment of these corner cases, the
calculation is adjusted. The new solution first subtracts the length by
one, and then calculates the correct bucket by shifting accordingly,
i.e. _pos = fls64((l - 1) >> 13), l > 0.
This not only fixes the issues named above, but also makes the whole
bucket assignment easier to follow.
Same is done for SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len),
where the calculation of the bucket position is similar to the one
named above.
Fixes: e0e4b8fa5338 ("net/smc: Add SMC statistics support")
Suggested-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Nils Hoppmann <niho@linux.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-09 10:40:48 -04:00
|
|
|
int _pos; \
|
2021-06-16 10:52:55 -04:00
|
|
|
int m = SMC_BUF_MAX - 1; \
|
net/smc: Fix pos miscalculation in statistics
SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) will calculate
wrong bucket positions for payloads of exactly 4096 bytes and
(1 << (m + 12)) bytes, with m == SMC_BUF_MAX - 1.
Intended bucket distribution:
Assume l == size of payload, m == SMC_BUF_MAX - 1.
Bucket 0 : 0 < l <= 2^13
Bucket n, 1 <= n <= m-1 : 2^(n+12) < l <= 2^(n+13)
Bucket m : l > 2^(m+12)
Current solution:
_pos = fls64((l) >> 13)
[...]
_pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m
For l == 4096, _pos == -1, but should be _pos == 0.
For l == (1 << (m + 12)), _pos == m, but should be _pos == m - 1.
In order to avoid special treatment of these corner cases, the
calculation is adjusted. The new solution first subtracts the length by
one, and then calculates the correct bucket by shifting accordingly,
i.e. _pos = fls64((l - 1) >> 13), l > 0.
This not only fixes the issues named above, but also makes the whole
bucket assignment easier to follow.
Same is done for SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _len),
where the calculation of the bucket position is similar to the one
named above.
Fixes: e0e4b8fa5338 ("net/smc: Add SMC statistics support")
Suggested-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Nils Hoppmann <niho@linux.ibm.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Wenjia Zhang <wenjia@linux.ibm.com>
Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2023-10-09 10:40:48 -04:00
|
|
|
if (_l <= 0) \
|
|
|
|
break; \
|
|
|
|
_pos = fls((_l - 1) >> 13); \
|
|
|
|
_pos = (_pos <= m) ? _pos : m; \
|
2021-06-16 10:52:58 -04:00
|
|
|
this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
|
2021-06-16 10:52:55 -04:00
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
|
|
|
|
this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _len) \
|
2021-06-16 10:52:55 -04:00
|
|
|
do { \
|
2021-06-16 10:52:58 -04:00
|
|
|
struct net *_net = sock_net(&(_smc)->sk); \
|
|
|
|
struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
|
2021-06-16 10:52:55 -04:00
|
|
|
typeof(_is_smcd) is_d = (_is_smcd); \
|
|
|
|
typeof(_is_rx) is_r = (_is_rx); \
|
|
|
|
typeof(_len) l = (_len); \
|
|
|
|
if ((is_d) && (is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, l); \
|
2021-06-16 10:52:55 -04:00
|
|
|
if ((is_d) && !(is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, l); \
|
2021-06-16 10:52:55 -04:00
|
|
|
if (!(is_d) && (is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, l); \
|
2021-06-16 10:52:55 -04:00
|
|
|
if (!(is_d) && !(is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, l); \
|
2021-06-16 10:52:55 -04:00
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \
|
2021-06-16 10:52:55 -04:00
|
|
|
do { \
|
2021-06-16 10:52:58 -04:00
|
|
|
struct net *net = sock_net(&(_smc)->sk); \
|
|
|
|
struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \
|
2021-06-16 10:52:55 -04:00
|
|
|
typeof(_is_smcd) is_d = (_is_smcd); \
|
|
|
|
typeof(_is_rx) is_r = (_is_rx); \
|
|
|
|
if ((is_d) && (is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \
|
2021-06-16 10:52:55 -04:00
|
|
|
if ((is_d) && !(is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \
|
2021-06-16 10:52:55 -04:00
|
|
|
if (!(is_d) && (is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \
|
2021-06-16 10:52:55 -04:00
|
|
|
if (!(is_d) && !(is_r)) \
|
2021-06-16 10:52:58 -04:00
|
|
|
SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \
|
2021-06-16 10:52:55 -04:00
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \
|
|
|
|
SMC_STAT_RMB(smc, reuse, is_smcd, is_rx)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \
|
|
|
|
SMC_STAT_RMB(smc, alloc, is_smcd, is_rx)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \
|
|
|
|
SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \
|
|
|
|
SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \
|
|
|
|
SMC_STAT_RMB(smc, buf_full, is_smcd, false)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \
|
|
|
|
SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \
|
|
|
|
SMC_STAT_RMB(smc, buf_size_small, is_smcd, false)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \
|
|
|
|
SMC_STAT_RMB(smc, buf_size_small, is_smcd, true)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \
|
|
|
|
SMC_STAT_RMB(smc, buf_full, is_smcd, true)
|
2021-06-16 10:52:55 -04:00
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_INC(_smc, type) \
|
2021-06-16 10:52:55 -04:00
|
|
|
do { \
|
2021-06-16 10:52:58 -04:00
|
|
|
typeof(_smc) __smc = _smc; \
|
|
|
|
bool is_smcd = !(__smc)->conn.lnk; \
|
|
|
|
struct net *net = sock_net(&(__smc)->sk); \
|
|
|
|
struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \
|
2021-06-16 10:52:55 -04:00
|
|
|
if ((is_smcd)) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
|
|
|
|
else \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \
|
2021-06-16 10:52:55 -04:00
|
|
|
do { \
|
|
|
|
typeof(_aclc) acl = (_aclc); \
|
|
|
|
bool is_v2 = (acl->hdr.version == SMC_V2); \
|
|
|
|
bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
|
2021-06-16 10:52:58 -04:00
|
|
|
struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \
|
2021-06-16 10:52:55 -04:00
|
|
|
if (is_v2 && is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
|
|
|
|
else if (is_v2 && !is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
|
|
|
|
else if (!is_v2 && is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
|
|
|
|
else if (!is_v2 && !is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2021-06-16 10:52:58 -04:00
|
|
|
#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
|
2021-06-16 10:52:55 -04:00
|
|
|
do { \
|
|
|
|
typeof(_ini) i = (_ini); \
|
|
|
|
bool is_smcd = (i->is_smcd); \
|
2023-09-07 23:31:42 -04:00
|
|
|
u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
|
|
|
|
bool is_v2 = (version & SMC_V2); \
|
2021-06-16 10:52:58 -04:00
|
|
|
typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
|
2021-06-16 10:52:55 -04:00
|
|
|
if (is_v2 && is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
|
|
|
|
else if (is_v2 && !is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
|
|
|
|
else if (!is_v2 && is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
|
|
|
|
else if (!is_v2 && !is_smcd) \
|
|
|
|
this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
|
|
|
|
} \
|
|
|
|
while (0)
|
|
|
|
|
2021-06-16 10:52:56 -04:00
|
|
|
int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
|
2021-06-16 10:52:57 -04:00
|
|
|
int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
|
2021-06-16 10:52:58 -04:00
|
|
|
int smc_stats_init(struct net *net);
|
|
|
|
void smc_stats_exit(struct net *net);
|
2021-06-16 10:52:55 -04:00
|
|
|
|
|
|
|
#endif /* NET_SMC_SMC_STATS_H_ */
|