-
Notifications
You must be signed in to change notification settings - Fork 163
the achievement of sockmap based on strp_init
static int smap_init_sock(struct smap_psock *psock,
struct sock *sk)
{
static const struct strp_callbacks cb = {
.rcv_msg = smap_read_sock_strparser,
.parse_msg = smap_parse_func_strparser,
.read_sock_done = smap_read_sock_done,
};
return strp_init(&psock->strp, sk, &cb);
}
static void smap_read_sock_strparser(struct strparser *strp, struct sk_buff *skb) { struct smap_psock *psock;
rcu_read_lock(); psock = container_of(strp, struct smap_psock, strp); smap_do_verdict(psock, skb); rcu_read_unlock(); }
int strp_init(struct strparser *strp, struct sock *csk, struct strp_callbacks *cb) { struct socket *sock = csk->sk_socket; if (!cb || !cb->rcv_msg || !cb->parse_msg) return -EINVAL; if (!sock->ops->read_sock || !sock->ops->peek_len) return -EAFNOSUPPORT; memset(strp, 0, sizeof(*strp)); strp->sk = csk; setup_timer(&strp->rx_msg_timer, strp_rx_msg_timeout, (unsigned long)strp); INIT_WORK(&strp->rx_work, strp_rx_work); strp->cb.rcv_msg = cb->rcv_msg; strp->cb.parse_msg = cb->parse_msg; strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; strp->cb.abort_parser = cb->abort_parser ? : strp_abort_rx_strp; return 0; }
sk_data_ready -->strp_read_sock -->sock->ops->read_sock(strp->sk, &desc, strp_recv)
void strp_data_ready(struct strparser strp) { if (unlikely(strp->rx_stopped)) return; / This check is needed to synchronize with do_strp_rx_work.
- do_strp_rx_work acquires a process lock (lock_sock) whereas
- the lock held here is bh_lock_sock. The two locks can be
- held by different threads at the same time, but bh_lock_sock
- allows a thread in BH context to safely check if the process
- lock is held. In this case, if the lock is held, queue work. */ if (sock_owned_by_user(strp->sk)) { queue_work(strp_wq, &strp->rx_work); return; } if (strp->rx_paused) return; if (strp->rx_need_bytes) { if (strp_peek_len(strp) >= strp->rx_need_bytes) strp->rx_need_bytes = 0; else return; } **if (strp_read_sock(strp) == -ENOMEM) ** queue_work(strp_wq, &strp->rx_work); }
static int strp_read_sock(struct strparser *strp)
{
struct socket sock = strp->sk->sk_socket;
read_descriptor_t desc;
desc.arg.data = strp;
desc.error = 0;
desc.count = 1; / give more than one skb per call /
/ sk should be locked here, so okay to do read_sock */
**sock->ops->read_sock(strp->sk, &desc, strp_recv); **
desc.error = strp->cb.read_sock_done(strp, desc.error);
return desc.error;
}
static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, unsigned int orig_offset, size_t orig_len) { struct strparser *strp = (struct strparser *)desc->arg.data;
return __strp_recv(desc, orig_skb, orig_offset, orig_len, strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo); }
static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, unsigned int orig_offset, size_t orig_len, size_t max_msg_size, long timeo) { len = (strp->cb.parse_msg)(strp, head); / Give skb to upper layer */ strp->cb.rcv_msg(strp, head); }
static void strp_rx_msg_timeout(unsigned long arg) { struct strparser *strp = (struct strparser )arg; / Message assembly timed out */ STRP_STATS_INCR(strp->stats.rx_msg_timeouts); lock_sock(strp->sk); strp->cb.abort_parser(strp, ETIMEDOUT); release_sock(strp->sk); }