Skip to content

Commit

Permalink
net/smc: Add SMC statistics support
Browse files Browse the repository at this point in the history
Add the ability to collect SMC statistics information. Per-cpu
variables are used to collect the statistic information for better
performance and for reducing concurrency pitfalls. The code that is
collecting statistic data is implemented in macros to increase code
reuse and readability.

Signed-off-by: Guvenc Gulce <[email protected]>
Signed-off-by: Karsten Graul <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
guvenc authored and davem330 committed Jun 16, 2021
1 parent fb0a1da commit e0e4b8f
Show file tree
Hide file tree
Showing 7 changed files with 395 additions and 21 deletions.
2 changes: 1 addition & 1 deletion net/smc/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
obj-$(CONFIG_SMC) += smc.o
obj-$(CONFIG_SMC_DIAG) += smc_diag.o
smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o
smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o
89 changes: 71 additions & 18 deletions net/smc/af_smc.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
#include "smc_tx.h"
#include "smc_rx.h"
#include "smc_close.h"
#include "smc_stats.h"

static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
* creation on server
Expand Down Expand Up @@ -508,9 +509,42 @@ static void smc_link_save_peer_info(struct smc_link *link,
link->peer_mtu = clc->r0.qp_mtu;
}

static void smc_switch_to_fallback(struct smc_sock *smc)
static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
struct smc_stats_fback *fback_arr)
{
int cnt;

for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
fback_arr[cnt].count++;
break;
}
if (!fback_arr[cnt].fback_code) {
fback_arr[cnt].fback_code = smc->fallback_rsn;
fback_arr[cnt].count++;
break;
}
}
}

static void smc_stat_fallback(struct smc_sock *smc)
{
mutex_lock(&smc_stat_fback_rsn);
if (smc->listen_smc) {
smc_stat_inc_fback_rsn_cnt(smc, fback_rsn.srv);
fback_rsn.srv_fback_cnt++;
} else {
smc_stat_inc_fback_rsn_cnt(smc, fback_rsn.clnt);
fback_rsn.clnt_fback_cnt++;
}
mutex_unlock(&smc_stat_fback_rsn);
}

static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
smc->use_fallback = true;
smc->fallback_rsn = reason_code;
smc_stat_fallback(smc);
if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
smc->clcsock->file = smc->sk.sk_socket->file;
smc->clcsock->file->private_data = smc->clcsock;
Expand All @@ -522,8 +556,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
/* fall back during connect */
static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
{
smc_switch_to_fallback(smc);
smc->fallback_rsn = reason_code;
smc_switch_to_fallback(smc, reason_code);
smc_copy_sock_settings_to_clc(smc);
smc->connect_nonblock = 0;
if (smc->sk.sk_state == SMC_INIT)
Expand All @@ -538,13 +571,15 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
int rc;

if (reason_code < 0) { /* error, fallback is not possible */
this_cpu_inc(smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return reason_code;
}
if (reason_code != SMC_CLC_DECL_PEERDECL) {
rc = smc_clc_send_decline(smc, reason_code, version);
if (rc < 0) {
this_cpu_inc(smc_stats->clnt_hshake_err_cnt);
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
return rc;
Expand Down Expand Up @@ -992,6 +1027,7 @@ static int __smc_connect(struct smc_sock *smc)
if (rc)
goto vlan_cleanup;

SMC_STAT_CLNT_SUCC_INC(aclc);
smc_connect_ism_vlan_cleanup(smc, ini);
kfree(buf);
kfree(ini);
Expand Down Expand Up @@ -1308,6 +1344,7 @@ static void smc_listen_out_err(struct smc_sock *new_smc)
{
struct sock *newsmcsk = &new_smc->sk;

this_cpu_inc(smc_stats->srv_hshake_err_cnt);
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
Expand All @@ -1325,8 +1362,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
smc_listen_out_err(new_smc);
return;
}
smc_switch_to_fallback(new_smc);
new_smc->fallback_rsn = reason_code;
smc_switch_to_fallback(new_smc, reason_code);
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
smc_listen_out_err(new_smc);
Expand Down Expand Up @@ -1699,8 +1735,7 @@ static void smc_listen_work(struct work_struct *work)

/* check if peer is smc capable */
if (!tcp_sk(newclcsock->sk)->syn_smc) {
smc_switch_to_fallback(new_smc);
new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
smc_listen_out_connected(new_smc);
return;
}
Expand Down Expand Up @@ -1778,6 +1813,7 @@ static void smc_listen_work(struct work_struct *work)
}
smc_conn_save_peer_info(new_smc, cclc);
smc_listen_out_connected(new_smc);
SMC_STAT_SERV_SUCC_INC(ini);
goto out_free;

out_unlock:
Expand Down Expand Up @@ -1984,18 +2020,19 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)

if (msg->msg_flags & MSG_FASTOPEN) {
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
goto out;
}
}

if (smc->use_fallback)
if (smc->use_fallback) {
rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
else
} else {
rc = smc_tx_sendmsg(smc, msg, len);
SMC_STAT_TX_PAYLOAD(smc, len, rc);
}
out:
release_sock(sk);
return rc;
Expand Down Expand Up @@ -2030,6 +2067,7 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
} else {
msg->msg_namelen = 0;
rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
SMC_STAT_RX_PAYLOAD(smc, rc, rc);
}

out:
Expand Down Expand Up @@ -2194,8 +2232,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
case TCP_FASTOPEN_NO_COOKIE:
/* option not supported by SMC */
if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
} else {
rc = -EINVAL;
}
Expand All @@ -2204,18 +2241,22 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
if (val)
if (val) {
SMC_STAT_INC(!smc->conn.lnk, ndly_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
}
}
break;
case TCP_CORK:
if (sk->sk_state != SMC_INIT &&
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
if (!val)
if (!val) {
SMC_STAT_INC(!smc->conn.lnk, cork_cnt);
mod_delayed_work(smc->conn.lgr->tx_wq,
&smc->conn.tx_work, 0);
}
}
break;
case TCP_DEFER_ACCEPT:
Expand Down Expand Up @@ -2338,11 +2379,13 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
goto out;
}
release_sock(sk);
if (smc->use_fallback)
if (smc->use_fallback) {
rc = kernel_sendpage(smc->clcsock, page, offset,
size, flags);
else
} else {
SMC_STAT_INC(!smc->conn.lnk, sendpage_cnt);
rc = sock_no_sendpage(sock, page, offset, size, flags);
}

out:
return rc;
Expand Down Expand Up @@ -2391,6 +2434,7 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
flags = MSG_DONTWAIT;
else
flags = 0;
SMC_STAT_INC(!smc->conn.lnk, splice_cnt);
rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
}
out:
Expand Down Expand Up @@ -2514,10 +2558,16 @@ static int __init smc_init(void)
if (!smc_close_wq)
goto out_alloc_hs_wq;

rc = smc_stats_init();
if (rc) {
pr_err("%s: smc_stats_init fails with %d\n", __func__, rc);
goto out_alloc_wqs;
}

rc = smc_core_init();
if (rc) {
pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
goto out_alloc_wqs;
goto out_smc_stat;
}

rc = smc_llc_init();
Expand Down Expand Up @@ -2569,6 +2619,8 @@ static int __init smc_init(void)
proto_unregister(&smc_proto);
out_core:
smc_core_exit();
out_smc_stat:
smc_stats_exit();
out_alloc_wqs:
destroy_workqueue(smc_close_wq);
out_alloc_hs_wq:
Expand All @@ -2591,6 +2643,7 @@ static void __exit smc_exit(void)
smc_ib_unregister_client();
destroy_workqueue(smc_close_wq);
destroy_workqueue(smc_hs_wq);
smc_stats_exit();
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
smc_pnet_exit();
Expand Down
13 changes: 12 additions & 1 deletion net/smc/smc_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include "smc_close.h"
#include "smc_ism.h"
#include "smc_netlink.h"
#include "smc_stats.h"

#define SMC_LGR_NUM_INCR 256
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
Expand Down Expand Up @@ -2029,6 +2030,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
struct smc_link_group *lgr = conn->lgr;
struct list_head *buf_list;
int bufsize, bufsize_short;
bool is_dgraded = false;
struct mutex *lock; /* lock buffer list */
int sk_buf_size;

Expand Down Expand Up @@ -2056,6 +2058,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
/* check for reusable slot in the link group */
buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
if (buf_desc) {
SMC_STAT_RMB_SIZE(is_smcd, is_rmb, bufsize);
SMC_STAT_BUF_REUSE(is_smcd, is_rmb);
memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */
}
Expand All @@ -2067,9 +2071,16 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)

if (PTR_ERR(buf_desc) == -ENOMEM)
break;
if (IS_ERR(buf_desc))
if (IS_ERR(buf_desc)) {
if (!is_dgraded) {
is_dgraded = true;
SMC_STAT_RMB_DOWNGRADED(is_smcd, is_rmb);
}
continue;
}

SMC_STAT_RMB_ALLOC(is_smcd, is_rmb);
SMC_STAT_RMB_SIZE(is_smcd, is_rmb, bufsize);
buf_desc->used = 1;
mutex_lock(lock);
list_add(&buf_desc->list, buf_list);
Expand Down
8 changes: 8 additions & 0 deletions net/smc/smc_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include "smc_cdc.h"
#include "smc_tx.h" /* smc_tx_consumer_update() */
#include "smc_rx.h"
#include "smc_stats.h"

/* callback implementation to wakeup consumers blocked with smc_rx_wait().
* indirectly called by smc_cdc_msg_recv_action().
Expand Down Expand Up @@ -227,6 +228,7 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
conn->urg_state == SMC_URG_READ)
return -EINVAL;

SMC_STAT_INC(!conn->lnk, urg_data_cnt);
if (conn->urg_state == SMC_URG_VALID) {
if (!(flags & MSG_PEEK))
smc->conn.urg_state = SMC_URG_READ;
Expand Down Expand Up @@ -303,6 +305,12 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);

readable = atomic_read(&conn->bytes_to_rcv);
if (readable >= conn->rmb_desc->len)
SMC_STAT_RMB_RX_FULL(!conn->lnk);

if (len < readable)
SMC_STAT_RMB_RX_SIZE_SMALL(!conn->lnk);
/* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;

Expand Down
35 changes: 35 additions & 0 deletions net/smc/smc_stats.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Shared Memory Communications over RDMA (SMC-R) and RoCE
*
* SMC statistics netlink routines
*
* Copyright IBM Corp. 2021
*
* Author(s): Guvenc Gulce
*/
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/ctype.h>
#include "smc_stats.h"

/* serialize fallback reason statistic gathering */
DEFINE_MUTEX(smc_stat_fback_rsn);
struct smc_stats __percpu *smc_stats; /* per cpu counters for SMC */
struct smc_stats_reason fback_rsn;

int __init smc_stats_init(void)
{
memset(&fback_rsn, 0, sizeof(fback_rsn));
smc_stats = alloc_percpu(struct smc_stats);
if (!smc_stats)
return -ENOMEM;

return 0;
}

void smc_stats_exit(void)
{
free_percpu(smc_stats);
}
Loading

0 comments on commit e0e4b8f

Please sign in to comment.