DPDK  18.11.11
examples/quota_watermark/qw/main.c
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
#include <rte_eal.h>
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_errno.h>
#include <rte_ethdev.h>
#include <rte_launch.h>
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_mbuf.h>
#include <rte_ring.h>
#include <rte_byteorder.h>
#include "args.h"
#include "main.h"
#include "init.h"
#include "../include/conf.h"
#ifdef QW_SOFTWARE_FC
#define SEND_PAUSE_FRAME(port_id, duration) send_pause_frame(port_id, duration)
#else
#define SEND_PAUSE_FRAME(port_id, duration) do { } while(0)
#endif
#define ETHER_TYPE_FLOW_CONTROL 0x8808
struct ether_fc_frame {
uint16_t opcode;
uint16_t param;
} __attribute__((__packed__));
int *quota;
unsigned int *low_watermark;
unsigned int *high_watermark;
uint16_t port_pairs[RTE_MAX_ETHPORTS];
struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
struct rte_mempool *mbuf_pool;
static void send_pause_frame(uint16_t port_id, uint16_t duration)
{
struct rte_mbuf *mbuf;
struct ether_fc_frame *pause_frame;
struct ether_hdr *hdr;
struct ether_addr mac_addr;
RTE_LOG_DP(DEBUG, USER1,
"Sending PAUSE frame (duration=%d) on port %d\n",
duration, port_id);
/* Get a mbuf from the pool */
mbuf = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(mbuf == NULL))
return;
/* Prepare a PAUSE frame */
hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
pause_frame = (struct ether_fc_frame *) &hdr[1];
rte_eth_macaddr_get(port_id, &mac_addr);
ether_addr_copy(&mac_addr, &hdr->s_addr);
void *tmp = &hdr->d_addr.addr_bytes[0];
*((uint64_t *)tmp) = 0x010000C28001ULL;
hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
pause_frame->opcode = rte_cpu_to_be_16(0x0001);
pause_frame->param = rte_cpu_to_be_16(duration);
mbuf->pkt_len = 60;
mbuf->data_len = 60;
rte_eth_tx_burst(port_id, 0, &mbuf, 1);
}
static unsigned int
get_previous_lcore_id(unsigned int lcore_id)
{
int i;
for (i = lcore_id - 1; i >= 0; i--)
return i;
return -1;
}
static unsigned int
get_last_lcore_id(void)
{
int i;
for (i = RTE_MAX_LCORE; i >= 0; i--)
return i;
return 0;
}
static void
receive_stage(__attribute__((unused)) void *args)
{
int i, ret;
uint16_t port_id;
uint16_t nb_rx_pkts;
unsigned int lcore_id;
unsigned int free;
struct rte_mbuf *pkts[MAX_PKT_QUOTA];
struct rte_ring *ring;
enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
lcore_id = rte_lcore_id();
RTE_LOG(INFO, USER1,
"%s() started on core %u\n", __func__, lcore_id);
while (1) {
/* Process each port round robin style */
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
if (!is_bit_set(port_id, portmask))
continue;
ring = rings[lcore_id][port_id];
if (ring_state[port_id] != RING_READY) {
if (rte_ring_count(ring) > *low_watermark)
continue;
else
ring_state[port_id] = RING_READY;
}
/* Enqueue received packets on the RX ring */
nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
(uint16_t) *quota);
ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
nb_rx_pkts, &free);
if (RING_SIZE - free > *high_watermark) {
ring_state[port_id] = RING_OVERLOADED;
send_pause_frame(port_id, 1337);
}
if (ret == 0) {
/*
* Return mbufs to the pool,
* effectively dropping packets
*/
for (i = 0; i < nb_rx_pkts; i++)
rte_pktmbuf_free(pkts[i]);
}
}
}
}
static int
pipeline_stage(__attribute__((unused)) void *args)
{
int i, ret;
int nb_dq_pkts;
uint16_t port_id;
unsigned int lcore_id, previous_lcore_id;
unsigned int free;
void *pkts[MAX_PKT_QUOTA];
struct rte_ring *rx, *tx;
enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
lcore_id = rte_lcore_id();
previous_lcore_id = get_previous_lcore_id(lcore_id);
RTE_LOG(INFO, USER1,
"%s() started on core %u - processing packets from core %u\n",
__func__, lcore_id, previous_lcore_id);
while (1) {
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
if (!is_bit_set(port_id, portmask))
continue;
tx = rings[lcore_id][port_id];
rx = rings[previous_lcore_id][port_id];
if (ring_state[port_id] != RING_READY) {
if (rte_ring_count(tx) > *low_watermark)
continue;
else
ring_state[port_id] = RING_READY;
}
/* Dequeue up to quota mbuf from rx */
nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
*quota, NULL);
if (unlikely(nb_dq_pkts < 0))
continue;
/* Enqueue them on tx */
ret = rte_ring_enqueue_bulk(tx, pkts,
nb_dq_pkts, &free);
if (RING_SIZE - free > *high_watermark)
ring_state[port_id] = RING_OVERLOADED;
if (ret == 0) {
/*
* Return mbufs to the pool,
* effectively dropping packets
*/
for (i = 0; i < nb_dq_pkts; i++)
rte_pktmbuf_free(pkts[i]);
}
}
}
return 0;
}
static int
send_stage(__attribute__((unused)) void *args)
{
uint16_t nb_dq_pkts;
uint16_t port_id;
uint16_t dest_port_id;
unsigned int lcore_id, previous_lcore_id;
struct rte_ring *tx;
struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
lcore_id = rte_lcore_id();
previous_lcore_id = get_previous_lcore_id(lcore_id);
RTE_LOG(INFO, USER1,
"%s() started on core %u - processing packets from core %u\n",
__func__, lcore_id, previous_lcore_id);
while (1) {
/* Process each ring round robin style */
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
if (!is_bit_set(port_id, portmask))
continue;
dest_port_id = port_pairs[port_id];
tx = rings[previous_lcore_id][port_id];
if (rte_ring_empty(tx))
continue;
/* Dequeue packets from tx and send them */
nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
(void *) tx_pkts, *quota, NULL);
rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
}
}
return 0;
}
int
main(int argc, char **argv)
{
int ret;
unsigned int lcore_id, master_lcore_id, last_lcore_id;
uint16_t port_id;
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
argc -= ret;
argv += ret;
init_dpdk();
setup_shared_variables();
*quota = 32;
*low_watermark = 60 * RING_SIZE / 100;
last_lcore_id = get_last_lcore_id();
master_lcore_id = rte_get_master_lcore();
/* Parse the application's arguments */
ret = parse_qw_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
/* Create a pool of mbuf to store packets */
mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
MBUF_DATA_SIZE, rte_socket_id());
if (mbuf_pool == NULL)
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
if (is_bit_set(port_id, portmask)) {
configure_eth_port(port_id);
init_ring(master_lcore_id, port_id);
}
pair_ports();
/*
* Start pipeline_connect() on all the available slave lcores
* but the last
*/
for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
if (rte_lcore_is_enabled(lcore_id) &&
lcore_id != master_lcore_id) {
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
if (is_bit_set(port_id, portmask))
init_ring(lcore_id, port_id);
rte_eal_remote_launch(pipeline_stage,
NULL, lcore_id);
}
}
/* Start send_stage() on the last slave core */
rte_eal_remote_launch(send_stage, NULL, last_lcore_id);
/* Start receive_stage() on the master core */
receive_stage(NULL);
return 0;
}
rte_cpu_to_be_16
static rte_be16_t rte_cpu_to_be_16(uint16_t x)
unlikely
#define unlikely(x)
Definition: rte_branch_prediction.h:38
rte_lcore.h
rte_log.h
rte_log_set_global_level
void rte_log_set_global_level(uint32_t level)
rte_socket_id
unsigned rte_socket_id(void)
rte_strerror
const char * rte_strerror(int errnum)
ether_hdr::s_addr
struct ether_addr s_addr
Definition: rte_ether.h:273
rte_ring_count
static unsigned rte_ring_count(const struct rte_ring *r)
Definition: rte_ring.h:682
rte_ring.h
rte_ring_enqueue_bulk
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
Definition: rte_ring.h:470
rte_mbuf::data_len
uint16_t data_len
Definition: rte_mbuf.h:559
rte_ring_empty
static int rte_ring_empty(const struct rte_ring *r)
Definition: rte_ring.h:729
rte_get_master_lcore
static unsigned rte_get_master_lcore(void)
Definition: rte_lcore.h:121
rte_eal.h
rte_eth_rx_burst
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
Definition: rte_ethdev.h:3874
rte_mbuf
Definition: rte_mbuf.h:478
ether_addr
Definition: rte_ether.h:57
rte_eth_macaddr_get
void rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
rte_ring_dequeue_burst
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
Definition: rte_ring.h:933
rte_errno.h
rte_pktmbuf_mtod
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1909
ether_addr_copy
static void ether_addr_copy(const struct ether_addr *ea_from, struct ether_addr *ea_to)
Definition: rte_ether.h:225
rte_eth_tx_burst
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
Definition: rte_ethdev.h:4149
rte_lcore_id
static unsigned rte_lcore_id(void)
Definition: rte_lcore.h:109
rte_ring
Definition: rte_ring.h:83
rte_exit
void rte_exit(int exit_code, const char *format,...)
rte_common.h
ether_hdr::ether_type
uint16_t ether_type
Definition: rte_ether.h:274
ether_addr::addr_bytes
uint8_t addr_bytes[ETHER_ADDR_LEN]
Definition: rte_ether.h:58
rte_panic
#define rte_panic(...)
Definition: rte_debug.h:50
rte_debug.h
RTE_LOG_INFO
#define RTE_LOG_INFO
Definition: rte_log.h:85
rte_eal_init
int rte_eal_init(int argc, char **argv)
rte_mempool
Definition: rte_mempool.h:213
rte_pktmbuf_pool_create
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
rte_mbuf::pkt_len
uint32_t pkt_len
Definition: rte_mbuf.h:558
rte_pktmbuf_free
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1747
rte_ethdev.h
RTE_LOG
#define RTE_LOG(l, t,...)
Definition: rte_log.h:320
rte_mbuf.h
rte_launch.h
rte_eal_remote_launch
int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id)
ether_hdr::d_addr
struct ether_addr d_addr
Definition: rte_ether.h:272
RTE_LOG_DP
#define RTE_LOG_DP(l, t,...)
Definition: rte_log.h:344
ether_hdr
Definition: rte_ether.h:271
rte_lcore_is_enabled
static int rte_lcore_is_enabled(unsigned lcore_id)
Definition: rte_lcore.h:225
rte_byteorder.h
rte_errno
#define rte_errno
Definition: rte_errno.h:29
rte_pktmbuf_alloc
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1328