Skip to content

Commit

Permalink
Merge branch 'quick_tx_module' of github.com:appneta/tcpreplay into q…
Browse files Browse the repository at this point in the history
…uick_tx_module
  • Loading branch information
fklassen committed Oct 7, 2014
2 parents 93e0614 + 517d724 commit 3905371
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 47 deletions.
29 changes: 10 additions & 19 deletions kernel/linux/include/linux/quick_tx.h
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,6 @@ struct quick_tx_dev {

ktime_t time_start_tx;
ktime_t time_end_tx;

};

struct quick_tx_ops {
Expand Down Expand Up @@ -320,8 +319,8 @@ struct quick_tx {

/*
* Maps a single DMA block for device
* @param dev quick_tx structure returned from a quick_tx_open call
* @return 0 on success mapping, otherwise -1
* @param dev quick_tx structure returned from a quick_tx_open call
* @return 0 on success mapping, otherwise -1
*/
static inline int quick_tx_mmap_mem_block(struct quick_tx* dev) {
if (dev->data->num_mem_blocks < MEM_BLOCK_TABLE_SIZE) {
Expand Down Expand Up @@ -352,18 +351,16 @@ static inline int quick_tx_mmap_mem_block(struct quick_tx* dev) {
* @dev quick_tx device pointer
* @bytes number of bytes the application plans to transmit
*
* @return will return the number of blocks that actually allocated in the kernel
* the number may be below or above the passed in value
* a return of 0 means that there is no more room for further
* allocations
* @return will return the number of blocks that was actually allocated in the kernel
* module. If the return is 0 then there is definitely no more space for allocation
*/
static inline int quick_tx_alloc_mem_space(struct quick_tx* dev, int64_t bytes) {
if (dev && dev->data) {
int num = 0;
int64_t num_pages = bytes / 256; // TODO should this be named 'num_blocks' ??
while (num_pages > 0 && dev->data->num_mem_blocks < MEM_BLOCK_TABLE_SIZE) {
int64_t num_blocks = 1 + (bytes / (PAGE_SIZE * dev->data->num_pages_per_block));
while (num_blocks > 0 && dev->data->num_mem_blocks < MEM_BLOCK_TABLE_SIZE) {
if (quick_tx_mmap_mem_block(dev) == 0) {
num_pages -= dev->data->num_pages_per_block;
num_blocks--;
num++;
} else {
fprintf(stderr, "MAP_FAILED for index %d\n", dev->data->num_mem_blocks);
Expand Down Expand Up @@ -484,13 +481,6 @@ static inline bool __get_write_offset_and_inc(struct quick_tx* dev, int length,
return false;
}

/* Sanity check */
// TODO fix so user doesn't have to know about padding
if (length > next_mem_block->length) {
printf("Fatal error: Size of padded packet cannot surpass the size of a DMA block!\n");
exit(1);
}

/* Increment the offset counters and dma block index */
data->mem_producer_index = new_mem_producer_index;
data->mem_producer_offset = length;
Expand Down Expand Up @@ -624,6 +614,8 @@ static inline int quick_tx_send_packet(struct quick_tx* dev, const void* buffer,
num_lookup_sleeps++;
}
}

return length;
}


Expand All @@ -634,8 +626,7 @@ static inline void quick_tx_wait_for_tx_complete(struct quick_tx* dev) {

/*
* Call this function to close the QuickTX device
* @param qtx pointer to a quick_tx structure
* @return quick_tx object
* @param qtx pointer to a quick_tx structure
*/
static inline void quick_tx_close(struct quick_tx* dev) {
if (dev) {
Expand Down
19 changes: 7 additions & 12 deletions kernel/linux/quick_tx/quick_tx_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@

struct kmem_cache *qtx_skbuff_head_cache __read_mostly;
struct quick_tx_dev quick_tx_devs[MAX_QUICK_TX_DEV];
u32 num_quick_tx_devs;
DEFINE_MUTEX(init_mutex);

#define VIRTIO_NET_NAME "virtio_net"
Expand All @@ -47,20 +46,16 @@ static void quick_tx_set_ops(struct quick_tx_dev *dev)
{
if (!strncmp(quick_tx_netdev_drivername(dev->netdev), VIRTIO_NET_NAME, strlen(VIRTIO_NET_NAME))) {
dev->ops = &quick_tx_virtio_net_ops;
qtx_error("Set %s operations", VIRTIO_NET_NAME);
return;
} else if (!strncmp(quick_tx_netdev_drivername(dev->netdev), E1000E_NAME, strlen(E1000E_NAME))) {
dev->ops = &quick_tx_default_ops;
qtx_error("Set %s operations", "default");
return;
} else if (!strncmp(quick_tx_netdev_drivername(dev->netdev), E1000_NAME, strlen(E1000_NAME))) {
dev->ops = &quick_tx_e1000_ops;
qtx_error("Set %s operations", E1000_NAME);
return;
}

dev->ops = &quick_tx_default_ops;
qtx_error("Set default operations");
return;
}

Expand All @@ -75,7 +70,8 @@ void quick_tx_calc_mbps(struct quick_tx_dev *dev)
}
}

void quick_tx_print_stats(struct quick_tx_dev *dev) {
void quick_tx_print_stats(struct quick_tx_dev *dev)
{
qtx_info("Run complete, printing TX statistics for %s:", dev->quick_tx_misc.name);
qtx_info("\t TX Queue was frozen of stopped: \t%llu", dev->num_tq_frozen_or_stopped);
qtx_info("\t TX returned locked: \t\t\t%llu", dev->num_tx_locked);
Expand Down Expand Up @@ -140,7 +136,8 @@ static int quick_tx_release (struct inode * inodp, struct file * file)
return 0;
}

static int quick_tx_init_name(struct quick_tx_dev* dev) {
static int quick_tx_init_name(struct quick_tx_dev* dev)
{
int ret;

dev->quick_tx_misc.name =
Expand Down Expand Up @@ -171,7 +168,8 @@ static int quick_tx_init_name(struct quick_tx_dev* dev) {
return ret;
}

static void quick_tx_remove_device(struct quick_tx_dev* dev) {
static void quick_tx_remove_device(struct quick_tx_dev* dev)
{
if (dev->registered == true) {
qtx_info("Removing QuickTx device %s", dev->quick_tx_misc.nodename);
kfree(dev->quick_tx_misc.name);
Expand Down Expand Up @@ -246,16 +244,13 @@ static int quick_tx_init(void)
dev->using_mem_coherent = false;
#endif

qtx_error("set using_mem_coherent to %d", dev->using_mem_coherent);

quick_tx_set_ops(dev);

i++;
}
}
read_unlock(&dev_base_lock);

num_quick_tx_devs = i;
qtx_skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct quick_tx_skb),
0,
Expand All @@ -275,7 +270,7 @@ static int quick_tx_init(void)
quick_tx_remove_device(&quick_tx_devs[i]);
}

qtx_error("Error occurred while initializing, quick_tx is exiting");
qtx_error("An error occurred while initializing, quick_tx is exiting");

mutex_unlock(&init_mutex);

Expand Down
32 changes: 22 additions & 10 deletions kernel/linux/quick_tx/quick_tx_mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@

#include <linux/quick_tx.h>

bool quick_tx_is_netdev_exist(struct quick_tx_dev *dev) {
bool quick_tx_is_netdev_exist(struct quick_tx_dev *dev)
{
struct net_device *netdev;
bool netdev_exists = false;

Expand Down Expand Up @@ -102,7 +103,7 @@ void quick_tx_vm_mem_close(struct vm_area_struct *vma)

dev->shared_data->num_mem_blocks--;
} else {
qtx_error("Cannot unmap a DMA block! no more blocks to unmap");
qtx_error("Cannot unmap a DMA block! there are no more blocks to unmap for this quick_tx device");
}
}

Expand All @@ -114,7 +115,8 @@ static const struct vm_operations_struct quick_tx_vma_ops_dma = {
.close = quick_tx_vm_mem_close
};

int quick_tx_mmap_master(struct file * file, struct vm_area_struct * vma) {
int quick_tx_mmap_master(struct file * file, struct vm_area_struct * vma)
{
int ret = 0;
struct miscdevice* miscdev = file->private_data;
struct quick_tx_dev* dev = container_of(miscdev, struct quick_tx_dev, quick_tx_misc);
Expand Down Expand Up @@ -165,12 +167,15 @@ int quick_tx_mmap_master(struct file * file, struct vm_area_struct * vma) {
dev->shared_data->postfix_len = sizeof(struct skb_shared_info);
dev->shared_data->producer_wait_done_flag = 1;

dev->shared_data->num_pages_per_block = 2 * (PAGE_ALIGN(dev->netdev->mtu) >> PAGE_SHIFT);
dev->shared_data->num_pages_per_block = (PAGE_ALIGN(
SKB_DATA_ALIGN(SKB_DATA_ALIGN(
dev->shared_data->prefix_len + dev->netdev->mtu)
+ dev->shared_data->postfix_len))
>> PAGE_SHIFT);

dev->quit_work = false;
smp_wmb();

qtx_error("pages per DMA block set to %d", dev->shared_data->num_pages_per_block);

INIT_WORK(&dev->tx_work, quick_tx_worker);
dev->tx_workqueue = alloc_workqueue(QUICK_TX_WORKQUEUE, WQ_UNBOUND | WQ_CPU_INTENSIVE | WQ_HIGHPRI, 1);
queue_work(dev->tx_workqueue, &dev->tx_work);
Expand Down Expand Up @@ -218,9 +223,12 @@ int quick_tx_mmap_mem_block(struct file * file, struct vm_area_struct * vma)

if (!mem_block_p)
{
qtx_error("Could not allocate memory for device, exiting");
qtx_error("Dma mappping errors: %d", dma_mapping_error(dev->netdev->dev.parent,
(dma_addr_t)&entry->mem_handle));
qtx_error("Could not allocate memory block for device %s", miscdev->name);
#if DMA_COHERENT
if (dev->using_mem_coherent)
qtx_error("DMA mappping errors: %d", dma_mapping_error(dev->netdev->dev.parent,
(dma_addr_t)&entry->mem_handle));
#endif
ret = -ENOMEM;
goto error;

Expand Down Expand Up @@ -273,7 +281,11 @@ int quick_tx_mmap(struct file * file, struct vm_area_struct * vma)
} else if ((dev->shared_data) && num_pages == dev->shared_data->num_pages_per_block) {
return quick_tx_mmap_mem_block(file, vma);
} else {
qtx_error("Invalid map size!");
if (dev->shared_data)
qtx_error("Passed in invalid size as parameter. Master mmap should be %lu pages and memory blocks should be %d pages",
QTX_MASTER_PAGE_NUM, dev->shared_data->num_pages_per_block);
else
qtx_error("Passed in an invalid size as a parameter, master mmap should be %lu pages long", QTX_MASTER_PAGE_NUM);
return -EINVAL;
}
}
16 changes: 10 additions & 6 deletions kernel/linux/quick_tx/quick_tx_worker.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,16 +265,22 @@ static inline void poll_napi(struct net_device *dev)

list_for_each_entry(napi, &dev->napi_list, dev_list) {
napi_disable(napi);
#ifdef CONFIG_NETPOLL
if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
#endif
budget = poll_one_napi(napi, budget);
#ifdef CONFIG_NETPOLL
spin_unlock(&napi->poll_lock);

}
#endif
napi_enable(napi);
}
}

inline void quick_tx_wait_free_skb(struct quick_tx_dev *dev) {
inline void quick_tx_wait_free_skb(struct quick_tx_dev *dev)
{
poll_napi(dev->netdev);
}

Expand Down Expand Up @@ -312,8 +318,6 @@ void quick_tx_worker(struct work_struct *work)
u32 aligned_length = 0;
u32 full_size = 0;

qtx_error("Starting quick_tx_worker");

if (!netif_device_present(dev->netdev) || !netif_running(dev->netdev)) {
qtx_error("Device cannot currently transmit, it is not running.");
qtx_error("Force stopping transmit..");
Expand Down Expand Up @@ -347,7 +351,7 @@ void quick_tx_worker(struct work_struct *work)
0, NUMA_NO_NODE, mem_block->kernel_addr + entry->block_offset, full_size);
if (unlikely(!qtx_skb)) {
atomic_dec(&mem_block->users);
qtx_error("ALLOC_ERROR: Decrement on %d. Users at = %d",
qtx_error("Error allocating skb, decrement users on %d block to %d",
entry->mem_block_index, atomic_read(&mem_block->users));
continue;
}
Expand All @@ -363,7 +367,7 @@ void quick_tx_worker(struct work_struct *work)
quick_tx_do_xmit(qtx_skb, txq, dev, 512, false);

#ifdef QUICK_TX_DEBUG
qtx_error("Consumed entry at index = %d, mem_block_index = %d, offset = %d, len = %d",
qtx_info("Consumed entry at index = %d, mem_block_index = %d, offset = %d, len = %d",
data->lookup_consumer_index, entry->mem_block_index, entry->block_offset, entry->length);
#endif

Expand All @@ -387,7 +391,7 @@ void quick_tx_worker(struct work_struct *work)
break;
}
#ifdef QUICK_TX_DEBUG
qtx_error("No packets to process, sleeping (index = %d), entry->consumed = %d", data->lookup_consumer_index,
qtx_info("No packets to process, sleeping (index = %d), entry->consumed = %d", data->lookup_consumer_index,
entry->consumed);
#endif

Expand Down

0 comments on commit 3905371

Please sign in to comment.