From: http://xenbits.xensource.com/linux-2.6.18-xen.hg # HG changeset 421 patch # User Keir Fraser # Date 1203330569 0 # Node ID e4dd072db2595c420bb21d9e835416f4fd543526 # Parent fc90e9b2c12b316b5460ece28f013e6de881af1a Subject: Solarflare: Resource driver (FATE#303479). Signed-off-by: Kieran Mansley Not-yet: Acked-by: jbeulich@novell.com Index: head-2008-03-17/drivers/net/sfc/Kconfig =================================================================== --- head-2008-03-17.orig/drivers/net/sfc/Kconfig 2008-03-17 14:26:32.000000000 +0100 +++ head-2008-03-17/drivers/net/sfc/Kconfig 2008-03-17 14:27:58.000000000 +0100 @@ -27,3 +27,10 @@ config SFC_MTD This module exposes the on-board flash and/or EEPROM memory as MTD devices (e.g. /dev/mtd1). This makes it possible to upload a new boot ROM to the NIC. + +config SFC_RESOURCE + depends on SFC && X86 + tristate "Solarflare Solarstorm SFC4000 resource driver" + help + This module provides the SFC resource manager driver. + Index: head-2008-03-17/drivers/net/sfc/Makefile =================================================================== --- head-2008-03-17.orig/drivers/net/sfc/Makefile 2008-03-17 14:26:32.000000000 +0100 +++ head-2008-03-17/drivers/net/sfc/Makefile 2008-03-17 14:27:58.000000000 +0100 @@ -39,3 +39,4 @@ obj-$(CONFIG_SFC_MTD) += $(sfc_mtd_o) sfc-objs = $(sfc_elements_o) sfc_mtd-objs = $(sfc_mtd_elements_o) +obj-$(CONFIG_SFC_RESOURCE) += sfc_resource/ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/Makefile =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/Makefile 2008-03-17 14:28:21.000000000 +0100 @@ -0,0 +1,15 @@ +obj-$(CONFIG_SFC_RESOURCE) := sfc_resource.o + +EXTRA_CFLAGS += -D__CI_HARDWARE_CONFIG_FALCON__ +EXTRA_CFLAGS += -D__ci_driver__ +EXTRA_CFLAGS += -Werror +EXTRA_CFLAGS += -Idrivers/net/sfc -Idrivers/net/sfc/sfc_resource + +sfc_resource-objs := resource_driver.o iopage.o efx_vi_shm.o \ + driverlink_new.o kernel_proc.o kfifo.o \ + nic.o eventq.o falcon.o falcon_mac.o falcon_hash.o \ + assert_valid.o buddy.o buffer_table.o filter_resource.o \ + iobufset_resource.o resource_manager.o resources.o \ + vi_resource_alloc.o vi_resource_event.o vi_resource_flush.o \ + vi_resource_manager.o driver_object.o kernel_compat.o + Index: head-2008-03-17/drivers/net/sfc/sfc_resource/assert_valid.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/assert_valid.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,95 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains functions to assert validness of resources and + * resource manager in DEBUG build of the resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include + +#ifndef NDEBUG +#include +#include +#include + +void +efrm_resource_manager_assert_valid(struct efrm_resource_manager *rm, + const char *file, int line) +{ + _EFRM_ASSERT(rm, file, line); + _EFRM_ASSERT(rm->rm_name, file, line); + _EFRM_ASSERT(rm->rm_type < EFRM_RESOURCE_NUM, file, line); + _EFRM_ASSERT(rm->rm_table, file, line); + _EFRM_ASSERT(rm->rm_table_size > 0, file, line); + _EFRM_ASSERT(rm->rm_dtor, file, line); +} +EXPORT_SYMBOL(efrm_resource_manager_assert_valid); + +/* + * \param rs resource to validate + * \param ref_count_is_zero One of 3 values + * > 0 - check ref count is zero + * = 0 - check ref count is non-zero + * < 0 - ref count could be any value + */ +void +efrm_resource_assert_valid(struct efrm_resource *rs, int ref_count_is_zero, + const char *file, int line) +{ + struct efrm_resource_manager *rm; + + _EFRM_ASSERT(rs, file, line); + + if (ref_count_is_zero >= 0) { + if (!(ref_count_is_zero || atomic_read(&rs->rs_ref_count) > 0) + || !(!ref_count_is_zero + || atomic_read(&rs->rs_ref_count) == 0)) + EFRM_WARN("%s: check %szero ref=%d " EFRM_RESOURCE_FMT, + __FUNCTION__, + ref_count_is_zero == 0 ? "non-" : "", + atomic_read(&rs->rs_ref_count), + EFRM_RESOURCE_PRI_ARG(rs->rs_handle)); + + _EFRM_ASSERT(!(ref_count_is_zero == 0) || + atomic_read(&rs->rs_ref_count) != 0, file, line); + _EFRM_ASSERT(!(ref_count_is_zero > 0) || + atomic_read(&rs->rs_ref_count) == 0, file, line); + } + + rm = efrm_rm_table[EFRM_RESOURCE_TYPE(rs->rs_handle)]; + efrm_resource_manager_assert_valid(rm, file, line); +} +EXPORT_SYMBOL(efrm_resource_assert_valid); + +#endif Index: head-2008-03-17/drivers/net/sfc/sfc_resource/buddy.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/buddy.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,307 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains implementation of a buddy allocator. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include /* get uintXX types on win32 */ +#include +#include +#include + +#if 1 +#define DEBUG_ALLOC(x) +#else +#define DEBUG_ALLOC(x) x + +static inline void efrm_buddy_dump(struct efrm_buddy_allocator *b) +{ + unsigned o; + + EFRM_NOTICE("%s: dump allocator with order %u", + __FUNCTION__, b->order); + for (o = 0; o <= b->order; o++) { + struct list_head *l = &b->free_lists[o]; + while (l->next != &b->free_lists[o]) { + l = l->next; + EFRM_NOTICE("%s: order %x: %zx", __FUNCTION__, o, + l - b->links); + } + } +} +#endif + +/* + * The purpose of the following inline functions is to give the + * understandable names to the simple actions. + */ +static inline void +efrm_buddy_free_list_add(struct efrm_buddy_allocator *b, + unsigned order, unsigned addr) +{ + list_add(&b->links[addr], &b->free_lists[order]); + b->orders[addr] = (uint8_t) b->order; +} +static inline void +efrm_buddy_free_list_del(struct efrm_buddy_allocator *b, unsigned addr) +{ + list_del(&b->links[addr]); + b->links[addr].next = NULL; +} +static inline int +efrm_buddy_free_list_empty(struct efrm_buddy_allocator *b, unsigned order) +{ + return list_empty(&b->free_lists[order]); +} +static inline unsigned +efrm_buddy_free_list_pop(struct efrm_buddy_allocator *b, unsigned order) +{ + struct list_head *l = list_pop(&b->free_lists[order]); + l->next = NULL; + return (unsigned)(l - b->links); +} +static inline int +efrm_buddy_addr_in_free_list(struct efrm_buddy_allocator *b, unsigned addr) +{ + return b->links[addr].next != NULL; +} +static inline unsigned +efrm_buddy_free_list_first(struct efrm_buddy_allocator *b, unsigned order) +{ + return (unsigned)(b->free_lists[order].next - b->links); +} + +int efrm_buddy_ctor(struct efrm_buddy_allocator *b, unsigned order) +{ + unsigned o; + unsigned size = 1 << order; + + DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, order)); + EFRM_ASSERT(b); + EFRM_ASSERT(order <= sizeof(unsigned) * 8 - 1); + + b->order = order; + b->free_lists = vmalloc((order + 1) * sizeof(struct list_head)); + if (b->free_lists == NULL) + goto fail1; + + b->links = vmalloc(size * sizeof(struct list_head)); + if (b->links == NULL) + goto fail2; + + b->orders = vmalloc(size); + if (b->orders == NULL) + goto fail3; + + memset(b->links, 0, size * sizeof(struct list_head)); + + for (o = 0; o <= b->order; ++o) + INIT_LIST_HEAD(b->free_lists + o); + + efrm_buddy_free_list_add(b, b->order, 0); + + return 0; + +fail3: + vfree(b->links); +fail2: + vfree(b->free_lists); +fail1: + return -ENOMEM; +} + +void efrm_buddy_dtor(struct efrm_buddy_allocator *b) +{ + EFRM_ASSERT(b); + + vfree(b->free_lists); + vfree(b->links); + vfree(b->orders); +} + +int efrm_buddy_alloc(struct efrm_buddy_allocator *b, unsigned order) +{ + unsigned smallest; + unsigned addr; + + DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, order)); + EFRM_ASSERT(b); + + /* Find smallest chunk that is big enough. ?? Can optimise this by + ** keeping array of pointers to smallest chunk for each order. + */ + smallest = order; + while (smallest <= b->order && + efrm_buddy_free_list_empty(b, smallest)) + ++smallest; + + if (smallest > b->order) { + DEBUG_ALLOC(EFRM_NOTICE + ("buddy - alloc order %d failed - max order %d", + order, b->order);); + return -ENOMEM; + } + + /* Split blocks until we get one of the correct size. */ + addr = efrm_buddy_free_list_pop(b, smallest); + + DEBUG_ALLOC(EFRM_NOTICE("buddy - alloc %x order %d cut from order %d", + addr, order, smallest);); + while (smallest-- > order) + efrm_buddy_free_list_add(b, smallest, addr + (1 << smallest)); + + EFRM_DO_DEBUG(b->orders[addr] = (uint8_t) order); + + EFRM_ASSERT(addr < 1u << b->order); + return addr; +} + +void +efrm_buddy_free(struct efrm_buddy_allocator *b, unsigned addr, + unsigned order) +{ + unsigned buddy_addr; + + DEBUG_ALLOC(EFRM_NOTICE("%s(%u, %u)", __FUNCTION__, addr, order)); + EFRM_ASSERT(b); + EFRM_ASSERT(order <= b->order); + EFRM_ASSERT((unsigned long)addr + ((unsigned long)1 << order) <= + (unsigned long)1 << b->order); + EFRM_ASSERT(!efrm_buddy_addr_in_free_list(b, addr)); + EFRM_ASSERT(b->orders[addr] == order); + + /* merge free blocks */ + while (order < b->order) { + buddy_addr = addr ^ (1 << order); + if (!efrm_buddy_addr_in_free_list(b, buddy_addr) || + b->orders[buddy_addr] != order) + break; + efrm_buddy_free_list_del(b, addr); + if (buddy_addr < addr) + addr = buddy_addr; + ++order; + } + + DEBUG_ALLOC(EFRM_NOTICE + ("buddy - free %x merged into order %d", addr, order);); + efrm_buddy_free_list_add(b, order, addr); +} + +void efrm_buddy_reserve_at_start(struct efrm_buddy_allocator *b, unsigned n) +{ + int addr; + unsigned o; + EFRM_DO_DEBUG(int n_save = n); + + DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, n)); + EFRM_ASSERT(b); + EFRM_ASSERT(n <= 1u << b->order && n > 0); + /* Whole space must be free. */ + EFRM_ASSERT(!efrm_buddy_free_list_empty(b, b->order)); + + o = fls(n); + + while (n) { + while (((unsigned)1 << o) > n) + --o; + EFRM_ASSERT(((unsigned)1 << o) <= n); + addr = efrm_buddy_alloc(b, o); + EFRM_ASSERT(addr + (1 << o) <= n_save); + n -= 1 << o; + } +} + +static int +__efrm_buddy_reserve_at_end(struct efrm_buddy_allocator *b, unsigned order, + int threshold) +{ + unsigned o, addr; + + DEBUG_ALLOC(EFRM_NOTICE("%s(%u, %d)", __FUNCTION__, order, threshold)); + EFRM_ASSERT(b); + + /* Find largest block; there must be one big enough (or caller has + ** goofed). + */ + for (o = b->order;; --o) { + if (efrm_buddy_free_list_empty(b, o)) + continue; + addr = efrm_buddy_free_list_first(b, o); + if (addr + (1 << o) <= (unsigned)threshold) + continue; + break; + } + EFRM_ASSERT(o >= order); + + /* Split down (keeping second half) until we reach + * the requested size. */ + addr = efrm_buddy_free_list_pop(b, o); + + while (o-- > order) { + efrm_buddy_free_list_add(b, o, addr); + addr += 1 << o; + } + + EFRM_DO_DEBUG(b->orders[addr] = (uint8_t) order); + + return addr; +} + +void efrm_buddy_reserve_at_end(struct efrm_buddy_allocator *b, unsigned n) +{ + int addr, threshold; + unsigned o; + EFRM_DO_DEBUG(int n_save = n); + + DEBUG_ALLOC(EFRM_NOTICE("%s(%u)", __FUNCTION__, n)); + DEBUG_ALLOC(efrm_buddy_dump(b)); + EFRM_ASSERT(b); + EFRM_ASSERT(n <= 1u << b->order); + + if (!n) + return; + + threshold = (1 << b->order) - n; + o = fls(n); + + while (n) { + while (((unsigned)1 << o) > n) + --o; + EFRM_ASSERT(((unsigned)1 << o) <= n); + addr = __efrm_buddy_reserve_at_end(b, o, threshold); + EFRM_ASSERT(addr >= (1 << b->order) - n_save); + n -= 1 << o; + } + DEBUG_ALLOC(efrm_buddy_dump(b)); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/buffer_table.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/buffer_table.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,210 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains abstraction of the buffer table on the NIC. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +/* +** Might be worth keeping a bitmap of which entries are clear. Then we +** wouldn't need to clear them all again when we free an allocation. +*/ + +#include +#include +#include +#include +#include + +/*! Comment? */ +struct efrm_buffer_table { + spinlock_t lock; + struct efrm_buddy_allocator buddy; +}; + +/* Efab buffer state. */ +static struct efrm_buffer_table efrm_buffers; + +int efrm_buffer_table_ctor(unsigned low, unsigned high) +{ + int log2_n_entries, rc; + + EFRM_ASSERT(high > 0); + EFRM_ASSERT(low < high); + + EFRM_TRACE("efrm_buffer_table_ctor: low=%u high=%u", low, high); + EFRM_NOTICE("efrm_buffer_table_ctor: low=%u high=%u", low, high); + + log2_n_entries = fls(high - 1); + + rc = efrm_buddy_ctor(&efrm_buffers.buddy, log2_n_entries); + if (rc < 0) { + EFRM_ERR("efrm_buffer_table_ctor: efrm_buddy_ctor(%d) " + "failed (%d)", log2_n_entries, rc); + return rc; + } + + spin_lock_init(&efrm_buffers.lock); + + efrm_buddy_reserve_at_start(&efrm_buffers.buddy, low); + efrm_buddy_reserve_at_end(&efrm_buffers.buddy, + (1 << log2_n_entries) - high); + + EFRM_TRACE("efrm_buffer_table_ctor: done"); + + return 0; +} + +void efrm_buffer_table_dtor(void) +{ + /* ?? debug check that all allocations have been freed? */ + + spin_lock_destroy(&efrm_buffers.lock); + efrm_buddy_dtor(&efrm_buffers.buddy); + + EFRM_TRACE("efrm_buffer_table_dtor: done"); +} + +/**********************************************************************/ + +int +efrm_buffer_table_alloc(unsigned order, + struct efhw_buffer_table_allocation *a) +{ + irq_flags_t lock_flags; + int rc; + + EFRM_ASSERT(&efrm_buffers.buddy); + EFRM_ASSERT(a); + + /* Round up to multiple of two, as the buffer clear logic works in + * pairs when not in "full" mode. */ + order = max_t(unsigned, order, 1); + + spin_lock_irqsave(&efrm_buffers.lock, lock_flags); + rc = efrm_buddy_alloc(&efrm_buffers.buddy, order); + spin_unlock_irqrestore(&efrm_buffers.lock, lock_flags); + + if (rc < 0) { + EFRM_ERR("efrm_buffer_table_alloc: failed (n=%ld) rc %d", + 1ul << order, rc); + return rc; + } + + EFRM_TRACE("efrm_buffer_table_alloc: base=%d n=%ld", + rc, 1ul << order); + a->order = order; + a->base = (unsigned)rc; + return 0; +} + +void efrm_buffer_table_free(struct efhw_buffer_table_allocation *a) +{ + irq_flags_t lock_flags; + struct efhw_nic *nic; + int nic_i; + + EFRM_ASSERT(&efrm_buffers.buddy); + EFRM_ASSERT(a); + EFRM_ASSERT(a->base != -1); + EFRM_ASSERT((unsigned long)a->base + (1ul << a->order) <= + efrm_buddy_size(&efrm_buffers.buddy)); + + EFRM_TRACE("efrm_buffer_table_free: base=%d n=%ld", + a->base, (1ul << a->order)); + + EFRM_FOR_EACH_NIC(nic_i, nic) + efhw_nic_buffer_table_clear(nic, a->base, 1ul << a->order); + + spin_lock_irqsave(&efrm_buffers.lock, lock_flags); + efrm_buddy_free(&efrm_buffers.buddy, a->base, a->order); + spin_unlock_irqrestore(&efrm_buffers.lock, lock_flags); + + EFRM_DO_DEBUG(a->base = a->order = -1); +} + +/**********************************************************************/ + +void +efrm_buffer_table_set(struct efhw_buffer_table_allocation *a, + unsigned i, dma_addr_t dma_addr, int owner) +{ + struct efhw_nic *nic; + int nic_i; + + EFRM_ASSERT(a); + EFRM_ASSERT(i < (unsigned)1 << a->order); + EFRM_FOR_EACH_NIC(nic_i, nic) + efhw_nic_buffer_table_set(nic, dma_addr, EFHW_NIC_PAGE_SIZE, + 0, owner, a->base + i); + /* NB. No commit Caller should call efrm_buffer_table_commit. There + are underlying hardware constraints regarding the number of + buffer table entries which can be pushed before commiting. */ +} + +unsigned long efrm_buffer_table_size(void) +{ + return efrm_buddy_size(&efrm_buffers.buddy); +} + +/**********************************************************************/ + +int +efrm_page_register(dma_addr_t dma_addr, int owner, + efhw_buffer_addr_t *buf_addr_out) +{ + struct efhw_buffer_table_allocation alloc; + int rc; + + rc = efrm_buffer_table_alloc(0, &alloc); + if (rc == 0) { + efrm_buffer_table_set(&alloc, 0, dma_addr, owner); + efrm_buffer_table_commit(); + *buf_addr_out = EFHW_BUFFER_ADDR(alloc.base, 0); + } + return rc; +} +EXPORT_SYMBOL(efrm_page_register); + +void efrm_page_unregister(efhw_buffer_addr_t buf_addr) +{ + struct efhw_buffer_table_allocation alloc; + + alloc.order = 0; + alloc.base = EFHW_BUFFER_PAGE(buf_addr); + efrm_buffer_table_free(&alloc); +} +EXPORT_SYMBOL(efrm_page_unregister); + +void efrm_buffer_table_commit(void) +{ + struct efhw_nic *nic; + int nic_i; + + EFRM_FOR_EACH_NIC(nic_i, nic) + efhw_nic_buffer_table_commit(nic); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,199 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC hardware interface. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_EFAB_HARDWARE_H__ +#define __CI_DRIVER_EFAB_HARDWARE_H__ + +#include "ci/driver/efab/hardware/workarounds.h" +#include + + +/*---------------------------------------------------------------------------- + * + * Common EtherFabric definitions + * + *---------------------------------------------------------------------------*/ + +#include +#include +#include + +/*---------------------------------------------------------------------------- + * + * EtherFabric varients + * + *---------------------------------------------------------------------------*/ + +#include + +/*---------------------------------------------------------------------------- + * + * EtherFabric Portable Hardware Layer defines + * + *---------------------------------------------------------------------------*/ + + /*-------------- Initialisation ------------ */ +#define efhw_nic_close_hardware(nic) \ + ((nic)->efhw_func->close_hardware(nic)) + +#define efhw_nic_init_hardware(nic, ev_handlers, mac_addr) \ + ((nic)->efhw_func->init_hardware((nic), (ev_handlers), (mac_addr))) + +/*-------------- Interrupt support ------------ */ +/** Handle interrupt. Return 0 if not handled, 1 if handled. */ +#define efhw_nic_interrupt(nic) \ + ((nic)->efhw_func->interrupt(nic)) + +#define efhw_nic_interrupt_enable(nic, index) \ + ((nic)->efhw_func->interrupt_enable(nic, index)) + +#define efhw_nic_interrupt_disable(nic, index) \ + ((nic)->efhw_func->interrupt_disable(nic, index)) + +#define efhw_nic_set_interrupt_moderation(nic, index, val) \ + ((nic)->efhw_func->set_interrupt_moderation(nic, index, val)) + +/*-------------- Event support ------------ */ + +#define efhw_nic_event_queue_enable(nic, evq, size, q_base, buf_base) \ + ((nic)->efhw_func->event_queue_enable(nic, evq, size, q_base, \ + buf_base)) + +#define efhw_nic_event_queue_disable(nic, evq, timer_only) \ + ((nic)->efhw_func->event_queue_disable(nic, evq, timer_only)) + +#define efhw_nic_wakeup_request(nic, q_base, index, evq) \ + ((nic)->efhw_func->wakeup_request(nic, q_base, index, evq)) + +#define efhw_nic_sw_event(nic, data, ev) \ + ((nic)->efhw_func->sw_event(nic, data, ev)) + +/*-------------- Filter support ------------ */ +#define efhw_nic_ipfilter_set(nic, type, index, dmaq, \ + saddr, sport, daddr, dport) \ + ((nic)->efhw_func->ipfilter_set(nic, type, index, dmaq, \ + saddr, sport, daddr, dport)) + +#define efhw_nic_ipfilter_attach(nic, index, dmaq) \ + ((nic)->efhw_func->ipfilter_attach(nic, index, dmaq)) + +#define efhw_nic_ipfilter_detach(nic, index) \ + ((nic)->efhw_func->ipfilter_detach(nic, index)) + +#define efhw_nic_ipfilter_clear(nic, index) \ + ((nic)->efhw_func->ipfilter_clear(nic, index)) + +/*-------------- DMA support ------------ */ +#define efhw_nic_dmaq_tx_q_init(nic, dmaq, evq, owner, tag, \ + dmaq_size, index, flags) \ + ((nic)->efhw_func->dmaq_tx_q_init(nic, dmaq, evq, owner, tag, \ + dmaq_size, index, flags)) + +#define efhw_nic_dmaq_rx_q_init(nic, dmaq, evq, owner, tag, \ + dmaq_size, index, flags) \ + ((nic)->efhw_func->dmaq_rx_q_init(nic, dmaq, evq, owner, tag, \ + dmaq_size, index, flags)) + +#define efhw_nic_dmaq_tx_q_disable(nic, dmaq) \ + ((nic)->efhw_func->dmaq_tx_q_disable(nic, dmaq)) + +#define efhw_nic_dmaq_rx_q_disable(nic, dmaq) \ + ((nic)->efhw_func->dmaq_rx_q_disable(nic, dmaq)) + +#define efhw_nic_flush_tx_dma_channel(nic, dmaq) \ + ((nic)->efhw_func->flush_tx_dma_channel(nic, dmaq)) + +#define efhw_nic_flush_rx_dma_channel(nic, dmaq) \ + ((nic)->efhw_func->flush_rx_dma_channel(nic, dmaq)) + +/*-------------- MAC Low level interface ---- */ +#define efhw_gmac_get_mac_addr(nic) \ + ((nic)->gmac->get_mac_addr((nic)->gmac)) + +/*-------------- Buffer table -------------- */ +#define efhw_nic_buffer_table_set(nic, addr, bufsz, region, \ + own_id, buf_id) \ + ((nic)->efhw_func->buffer_table_set(nic, addr, bufsz, region, \ + own_id, buf_id)) + +#define efhw_nic_buffer_table_set_n(nic, buf_id, addr, bufsz, \ + region, n_pages, own_id) \ + ((nic)->efhw_func->buffer_table_set_n(nic, buf_id, addr, bufsz, \ + region, n_pages, own_id)) + +#define efhw_nic_buffer_table_clear(nic, id, num) \ + ((nic)->efhw_func->buffer_table_clear(nic, id, num)) + +#define efhw_nic_buffer_table_commit(nic) \ + ((nic)->efhw_func->buffer_table_commit(nic)) + +/*---------------------------------------------------------------------------- + * Hardware specific portability macros for performance critical code. + * + * Warning: and driver code which is using these defines is not + * capable of supporting multiple NIC varients and should be built and + * marked appropriately + * + *---------------------------------------------------------------------------*/ + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + +/* --- DMA --- */ +#define EFHW_DMA_ADDRMASK (0xffffffffffffffffULL) + +/* --- Buffers --- */ +#define EFHW_BUFFER_ADDR FALCON_BUFFER_4K_ADDR +#define EFHW_BUFFER_PAGE FALCON_BUFFER_4K_PAGE +#define EFHW_BUFFER_OFF FALCON_BUFFER_4K_OFF + +/* --- Filters --- */ +#define EFHW_IP_FILTER_NUM FALCON_FILTER_TBL_NUM + +#define EFHW_MAX_PAGE_SIZE FALCON_MAX_PAGE_SIZE + +#else +# error no hardware definition found +#endif + +#if PAGE_SIZE <= EFHW_MAX_PAGE_SIZE +#define EFHW_NIC_PAGE_SIZE PAGE_SIZE +#else +#define EFHW_NIC_PAGE_SIZE EFHW_MAX_PAGE_SIZE +#endif +#define EFHW_NIC_PAGE_MASK (~(EFHW_NIC_PAGE_SIZE-1)) + +#endif /* __CI_DRIVER_EFAB_HARDWARE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/common.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/common.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,68 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC hardware interface common + * definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_EFAB_HARDWARE_COMMON_H__ +#define __CI_DRIVER_EFAB_HARDWARE_COMMON_H__ + +/*---------------------------------------------------------------------------- + * + * EtherFabric constants + * + *---------------------------------------------------------------------------*/ + +#define EFHW_1K 0x00000400u +#define EFHW_2K 0x00000800u +#define EFHW_4K 0x00001000u +#define EFHW_8K 0x00002000u +#define EFHW_16K 0x00004000u +#define EFHW_32K 0x00008000u +#define EFHW_64K 0x00010000u +#define EFHW_128K 0x00020000u +#define EFHW_256K 0x00040000u +#define EFHW_512K 0x00080000u +#define EFHW_1M 0x00100000u +#define EFHW_2M 0x00200000u +#define EFHW_4M 0x00400000u +#define EFHW_8M 0x00800000u +#define EFHW_16M 0x01000000u +#define EFHW_32M 0x02000000u +#define EFHW_48M 0x03000000u +#define EFHW_64M 0x04000000u +#define EFHW_128M 0x08000000u +#define EFHW_256M 0x10000000u +#define EFHW_512M 0x20000000u +#define EFHW_1G 0x40000000u +#define EFHW_2G 0x80000000u +#define EFHW_4G 0x100000000ULL +#define EFHW_8G 0x200000000ULL + +#endif /* __CI_DRIVER_EFAB_HARDWARE_COMMON_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,420 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) specific + * definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_EFAB_HARDWARE_FALCON_H__ +#define __CI_DRIVER_EFAB_HARDWARE_FALCON_H__ + +/*---------------------------------------------------------------------------- + * Compile options + *---------------------------------------------------------------------------*/ + +/* Falcon has an 8K maximum page size. */ +#define FALCON_MAX_PAGE_SIZE EFHW_8K + +/* include the register definitions */ +#include +#include +#include +#include +#include +#include +#include + +#define FALCON_DMA_TX_DESC_BYTES 8 +#define FALCON_DMA_RX_PHYS_DESC_BYTES 8 +#define FALCON_DMA_RX_BUF_DESC_BYTES 4 + + +/* ---- efhw_event_t helpers --- */ + +#ifndef EFHW_IS_LITTLE_ENDIAN +#error This needs lots of cpu_to_le64s() in +#endif + +/*!\ TODO look at whether there is an efficiency gain to be had by + treating the event codes to 32bit masks as is done for EF1 + + These masks apply to the full 64 bits of the event to extract the + event code - followed by the common event codes to expect + */ +#define __FALCON_OPEN_MASK(WIDTH) ((((uint64_t)1) << (WIDTH)) - 1) +#define FALCON_EVENT_CODE_MASK \ + (__FALCON_OPEN_MASK(EV_CODE_WIDTH) << EV_CODE_LBN) +#define FALCON_EVENT_EV_Q_ID_MASK \ + (__FALCON_OPEN_MASK(DRIVER_EV_EVQ_ID_WIDTH) << DRIVER_EV_EVQ_ID_LBN) +#define FALCON_EVENT_TX_FLUSH_Q_ID_MASK \ + (__FALCON_OPEN_MASK(DRIVER_EV_TX_DESCQ_ID_WIDTH) << \ + DRIVER_EV_TX_DESCQ_ID_LBN) +#define FALCON_EVENT_RX_FLUSH_Q_ID_MASK \ + (__FALCON_OPEN_MASK(DRIVER_EV_RX_DESCQ_ID_WIDTH) << \ + DRIVER_EV_RX_DESCQ_ID_LBN) +#define FALCON_EVENT_DRV_SUBCODE_MASK \ + (__FALCON_OPEN_MASK(DRIVER_EV_SUB_CODE_WIDTH) << \ + DRIVER_EV_SUB_CODE_LBN) + +#define FALCON_EVENT_FMT "[ev:%x:%08x:%08x]" +#define FALCON_EVENT_PRI_ARG(e) \ + ((unsigned)(((e).u64 & FALCON_EVENT_CODE_MASK) >> EV_CODE_LBN)), \ + ((unsigned)((e).u64 >> 32)), ((unsigned)((e).u64 & 0xFFFFFFFF)) + +#define FALCON_EVENT_CODE(evp) ((evp)->u64 & FALCON_EVENT_CODE_MASK) +#define FALCON_EVENT_WAKE_EVQ_ID(evp) \ + (((evp)->u64 & FALCON_EVENT_EV_Q_ID_MASK) >> DRIVER_EV_EVQ_ID_LBN) +#define FALCON_EVENT_TX_FLUSH_Q_ID(evp) \ + (((evp)->u64 & FALCON_EVENT_TX_FLUSH_Q_ID_MASK) >> \ + DRIVER_EV_TX_DESCQ_ID_LBN) +#define FALCON_EVENT_RX_FLUSH_Q_ID(evp) \ + (((evp)->u64 & FALCON_EVENT_RX_FLUSH_Q_ID_MASK) >> \ + DRIVER_EV_RX_DESCQ_ID_LBN) +#define FALCON_EVENT_DRIVER_SUBCODE(evp) \ + (((evp)->u64 & FALCON_EVENT_DRV_SUBCODE_MASK) >> \ + DRIVER_EV_SUB_CODE_LBN) + +#define FALCON_EVENT_CODE_CHAR ((uint64_t)DRIVER_EV_DECODE << EV_CODE_LBN) +#define FALCON_EVENT_CODE_SW ((uint64_t)DRV_GEN_EV_DECODE << EV_CODE_LBN) + + +/* so this is the size in bytes of an awful lot of things */ +#define FALCON_REGISTER128 (16) + +/* we define some unique dummy values as a debug aid */ +#ifdef _WIN32 +#define FALCON_ATOMIC_BASE 0xdeadbeef00000000ui64 +#else +#define FALCON_ATOMIC_BASE 0xdeadbeef00000000ULL +#endif +#define FALCON_ATOMIC_UPD_REG (FALCON_ATOMIC_BASE | 0x1) +#define FALCON_ATOMIC_PTR_TBL_REG (FALCON_ATOMIC_BASE | 0x2) +#define FALCON_ATOMIC_SRPM_UDP_EVQ_REG (FALCON_ATOMIC_BASE | 0x3) +#define FALCON_ATOMIC_RX_FLUSH_DESCQ (FALCON_ATOMIC_BASE | 0x4) +#define FALCON_ATOMIC_TX_FLUSH_DESCQ (FALCON_ATOMIC_BASE | 0x5) +#define FALCON_ATOMIC_INT_EN_REG (FALCON_ATOMIC_BASE | 0x6) +#define FALCON_ATOMIC_TIMER_CMD_REG (FALCON_ATOMIC_BASE | 0x7) +#define FALCON_ATOMIC_PACE_REG (FALCON_ATOMIC_BASE | 0x8) +#define FALCON_ATOMIC_INT_ACK_REG (FALCON_ATOMIC_BASE | 0x9) +/* XXX It crashed with odd value in FALCON_ATOMIC_INT_ADR_REG */ +#define FALCON_ATOMIC_INT_ADR_REG (FALCON_ATOMIC_BASE | 0xa) + +/*---------------------------------------------------------------------------- + * + * PCI control blocks for Falcon - + * (P) primary is for NET + * (S) secondary is for CHAR + * + *---------------------------------------------------------------------------*/ + +#define FALCON_P_CTR_AP_BAR 2 +#define FALCON_S_CTR_AP_BAR 0 +#define FALCON_S_DEVID 0x6703 + + +/*---------------------------------------------------------------------------- + * + * Falcon constants + * + *---------------------------------------------------------------------------*/ + +#define FALCON_DMAQ_NUM (EFHW_4K) +#define FALCON_EVQ_TBL_NUM (EFHW_4K) +#define FALCON_TIMERS_NUM (EFHW_4K) + +/* This value is an upper limit on the total number of filter table + * entries, including odd and even banks. The actual size of filter table + * is determined at runtime, as it can vary. + */ +#define FALCON_FILTER_TBL_NUM (EFHW_16K) + +/* max number of buffers which can be pushed before commiting */ +#define FALCON_BUFFER_UPD_MAX (128) + +/* We can tell falcon to write its RX buffers in 32 byte quantums, + and since we pad packets 2 bytes to the right we can't use + a full page (not unless we use jumbo mode for all queues) + + NOTE: tests/nic/dma.c assumes that the value here is the real NIC + value, so we explicitly round it down to the nearest 32 bytes */ + +/* #define FALCON_RX_USR_BUF_SIZE round_down(4096-2,32) */ +#define FALCON_RX_USR_BUF_SIZE 4064 + +#define FALCON_EVQ_RPTR_REG_P0 0x400 + +/*---------------------------------------------------------------------------- + * + * Falcon requires user-space descriptor pushes to be: + * dword[0-2]; wiob(); dword[3] + * + * Driver register access must be locked against other threads from + * the same driver but can be in any order: i.e dword[0-3]; wiob() + * + * The following helpers ensure that valid dword orderings are exercised + * + *---------------------------------------------------------------------------*/ + +/* A union to allow writting 64bit values as 32bit values, without + * hitting the compilers aliasing rules. We hope the compiler optimises + * away the copy's anyway */ +union __u64to32 { + uint64_t u64; + struct { +#ifdef EFHW_IS_LITTLE_ENDIAN + uint32_t a; + uint32_t b; +#else + uint32_t b; + uint32_t a; +#endif + } s; +}; + +static inline void +falcon_write_ddd_d(efhw_ioaddr_t kva, + uint32_t d0, uint32_t d1, uint32_t d2, uint32_t d3) +{ + writel(d0, kva + 0); + writel(d1, kva + 4); + writel(d2, kva + 8); + mmiowb(); + writel(d3, kva + 12); +} + +static inline void falcon_write_q(efhw_ioaddr_t kva, uint64_t q) +{ + union __u64to32 u; + u.u64 = q; + + writel(u.s.a, kva); + mmiowb(); + writel(u.s.b, kva + 4); +} + +static inline void falcon_read_q(efhw_ioaddr_t addr, uint64_t *q0) +{ + /* It is essential that we read dword0 first, so that + * the shadow register is updated with the latest value + * and we get a self consistent value. + */ + union __u64to32 u; + u.s.a = readl(addr); + rmb(); + u.s.b = readl(addr + 4); + + *q0 = u.u64; +} + +static inline void +falcon_write_qq(efhw_ioaddr_t kva, uint64_t q0, uint64_t q1) +{ + writeq(q0, kva + 0); + falcon_write_q(kva + 8, q1); +} + +static inline void +falcon_read_qq(efhw_ioaddr_t addr, uint64_t *q0, uint64_t *q1) +{ + falcon_read_q(addr, q0); + *q1 = readq(addr + 8); +} + + + +/*---------------------------------------------------------------------------- + * + * Buffer virtual addresses (4K buffers) + * + *---------------------------------------------------------------------------*/ + +/* Form a buffer virtual address from buffer ID and offset. If the offset +** is larger than the buffer size, then the buffer indexed will be +** calculated appropriately. It is the responsibility of the caller to +** ensure that they have valid buffers programmed at that address. +*/ +#define FALCON_VADDR_8K_S (13) +#define FALCON_VADDR_4K_S (12) +#define FALCON_VADDR_M 0xfffff /* post shift mask */ + +#define FALCON_BUFFER_8K_ADDR(id, off) (((id) << FALCON_VADDR_8K_S) + (off)) +#define FALCON_BUFFER_8K_PAGE(vaddr) \ + (((vaddr) >> FALCON_VADDR_8K_S) & FALCON_VADDR_M) +#define FALCON_BUFFER_8K_OFF(vaddr) \ + ((vaddr) & __FALCON_MASK32(FALCON_VADDR_8K_S)) + +#define FALCON_BUFFER_4K_ADDR(id, off) (((id) << FALCON_VADDR_4K_S) + (off)) +#define FALCON_BUFFER_4K_PAGE(vaddr) \ + (((vaddr) >> FALCON_VADDR_4K_S) & FALCON_VADDR_M) +#define FALCON_BUFFER_4K_OFF(vaddr) \ + ((vaddr) & __FALCON_MASK32(FALCON_VADDR_4K_S)) + +/*---------------------------------------------------------------------------- + * + * Timer helpers + * + *---------------------------------------------------------------------------*/ + +static inline int falcon_timer_page_addr(uint idx) +{ + + EFHW_ASSERT(TIMER_CMD_REG_KER_OFST == + (TIMER_CMD_REG_PAGE4_OFST - 4 * EFHW_8K)); + + EFHW_ASSERT(idx < FALCON_TIMERS_NUM); + + if (idx < 4) + return TIMER_CMD_REG_KER_OFST + (idx * EFHW_8K); + else if (idx < 1024) + return TIMER_CMD_REG_PAGE4_OFST + ((idx - 4) * EFHW_8K); + else + return TIMER_CMD_REG_PAGE123K_OFST + ((idx - 1024) * EFHW_8K); +} + +#define FALCON_TIMER_PAGE_MASK (EFHW_8K-1) + +static inline int falcon_timer_page_offset(uint idx) +{ + return falcon_timer_page_addr(idx) & FALCON_TIMER_PAGE_MASK; +} + +/*---------------------------------------------------------------------------- + * + * DMA Queue helpers + * + *---------------------------------------------------------------------------*/ + +/* iSCSI queue for A1; see bug 5427 for more details. */ +#define FALCON_A1_ISCSI_DMAQ 4 + +/*! returns an address within a bar of the TX DMA doorbell */ +static inline uint falcon_tx_dma_page_addr(uint dmaq_idx) +{ + uint page; + + EFHW_ASSERT((((TX_DESC_UPD_REG_PAGE123K_OFST) & (EFHW_8K - 1)) == + (((TX_DESC_UPD_REG_PAGE4_OFST) & (EFHW_8K - 1))))); + + EFHW_ASSERT(dmaq_idx < FALCON_DMAQ_NUM); + + if (dmaq_idx < 1024) + page = TX_DESC_UPD_REG_PAGE4_OFST + ((dmaq_idx - 4) * EFHW_8K); + else + page = + TX_DESC_UPD_REG_PAGE123K_OFST + + ((dmaq_idx - 1024) * EFHW_8K); + + return page; +} + +/*! returns an address within a bar of the RX DMA doorbell */ +static inline uint falcon_rx_dma_page_addr(uint dmaq_idx) +{ + uint page; + + EFHW_ASSERT((((RX_DESC_UPD_REG_PAGE123K_OFST) & (EFHW_8K - 1)) == + ((RX_DESC_UPD_REG_PAGE4_OFST) & (EFHW_8K - 1)))); + + EFHW_ASSERT(dmaq_idx < FALCON_DMAQ_NUM); + + if (dmaq_idx < 1024) + page = RX_DESC_UPD_REG_PAGE4_OFST + ((dmaq_idx - 4) * EFHW_8K); + else + page = + RX_DESC_UPD_REG_PAGE123K_OFST + + ((dmaq_idx - 1024) * EFHW_8K); + + return page; +} + +/*! "page"=NIC-dependent register set size */ +#define FALCON_DMA_PAGE_MASK (EFHW_8K-1) + +/*! returns an address within a bar of the start of the "page" + containing the TX DMA doorbell */ +static inline int falcon_tx_dma_page_base(uint dma_idx) +{ + return falcon_tx_dma_page_addr(dma_idx) & ~FALCON_DMA_PAGE_MASK; +} + +/*! returns an address within a bar of the start of the "page" + containing the RX DMA doorbell */ +static inline int falcon_rx_dma_page_base(uint dma_idx) +{ + return falcon_rx_dma_page_addr(dma_idx) & ~FALCON_DMA_PAGE_MASK; +} + +/*! returns an offset within a "page" of the TX DMA doorbell */ +static inline int falcon_tx_dma_page_offset(uint dma_idx) +{ + return falcon_tx_dma_page_addr(dma_idx) & FALCON_DMA_PAGE_MASK; +} + +/*! returns an offset within a "page" of the RX DMA doorbell */ +static inline int falcon_rx_dma_page_offset(uint dma_idx) +{ + return falcon_rx_dma_page_addr(dma_idx) & FALCON_DMA_PAGE_MASK; +} + +/*---------------------------------------------------------------------------- + * + * Events + * + *---------------------------------------------------------------------------*/ + +/* Falcon nails down the event queue mappings */ +#define FALCON_EVQ_KERNEL0 (0) /* hardwired for net driver */ +#define FALCON_EVQ_CHAR (4) /* char driver's event queue */ +#define FALCON_EVQ_NONIRQ (5) /* char driver's non interrupting + queue. Subsequent queues are + available for user apps */ + +/* reserved by the drivers */ +#define FALCON_EVQ_TBL_RESERVED (8) + +/* default DMA-Q sizes */ +#define FALCON_DMA_Q_DEFAULT_TX_SIZE 512 + +#define FALCON_DMA_Q_DEFAULT_RX_SIZE 512 + +#define FALCON_DMA_Q_DEFAULT_MMAP \ + (FALCON_DMA_Q_DEFAULT_TX_SIZE * (FALCON_DMA_TX_DESC_BYTES * 2)) + +/*---------------------------------------------------------------------------- + * + * DEBUG - Analyser trigger + * + *---------------------------------------------------------------------------*/ + +static inline void falcon_deadbeef(efhw_ioaddr_t efhw_kva, unsigned what) +{ + writel(what, efhw_kva + 0x300); + mmiowb(); +} +#endif /* __CI_DRIVER_EFAB_HARDWARE_FALCON_H__ */ +/*! \cidoxg_end */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_core.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_core.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,1149 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) core register + * definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#define FALCON_EXTENDED_P_BAR 1 + +/*************---- Bus Interface Unit Registers C Header ----*************/ +#define IOM_IND_ADR_REG_OFST 0x0 /* IO-mapped indirect access address + register */ + #define IOM_AUTO_ADR_INC_EN_LBN 16 + #define IOM_AUTO_ADR_INC_EN_WIDTH 1 + #define IOM_IND_ADR_LBN 0 + #define IOM_IND_ADR_WIDTH 16 +#define IOM_IND_DAT_REG_OFST 0x4 /* IO-mapped indirect access data register */ + #define IOM_IND_DAT_LBN 0 + #define IOM_IND_DAT_WIDTH 32 +#define ADR_REGION_REG_KER_OFST 0x0 /* Address region register */ +#define ADR_REGION_REG_OFST 0x0 /* Address region register */ + #define ADR_REGION3_LBN 96 + #define ADR_REGION3_WIDTH 18 + #define ADR_REGION2_LBN 64 + #define ADR_REGION2_WIDTH 18 + #define ADR_REGION1_LBN 32 + #define ADR_REGION1_WIDTH 18 + #define ADR_REGION0_LBN 0 + #define ADR_REGION0_WIDTH 18 +#define INT_EN_REG_KER_OFST 0x10 /* Kernel driver Interrupt enable register */ + #define KER_INT_CHAR_LBN 4 + #define KER_INT_CHAR_WIDTH 1 + #define KER_INT_KER_LBN 3 + #define KER_INT_KER_WIDTH 1 + #define ILL_ADR_ERR_INT_EN_KER_LBN 2 + #define ILL_ADR_ERR_INT_EN_KER_WIDTH 1 + #define SRM_PERR_INT_EN_KER_LBN 1 + #define SRM_PERR_INT_EN_KER_WIDTH 1 + #define DRV_INT_EN_KER_LBN 0 + #define DRV_INT_EN_KER_WIDTH 1 +#define INT_EN_REG_CHAR_OFST 0x20 /* Char Driver interrupt enable register */ + #define CHAR_INT_CHAR_LBN 4 + #define CHAR_INT_CHAR_WIDTH 1 + #define CHAR_INT_KER_LBN 3 + #define CHAR_INT_KER_WIDTH 1 + #define ILL_ADR_ERR_INT_EN_CHAR_LBN 2 + #define ILL_ADR_ERR_INT_EN_CHAR_WIDTH 1 + #define SRM_PERR_INT_EN_CHAR_LBN 1 + #define SRM_PERR_INT_EN_CHAR_WIDTH 1 + #define DRV_INT_EN_CHAR_LBN 0 + #define DRV_INT_EN_CHAR_WIDTH 1 +#define INT_ADR_REG_KER_OFST 0x30 /* Interrupt host address for Kernel driver */ + #define INT_ADR_KER_LBN 0 + #define INT_ADR_KER_WIDTH 64 + #define DRV_INT_KER_LBN 32 + #define DRV_INT_KER_WIDTH 1 + #define EV_FF_HALF_INT_KER_LBN 3 + #define EV_FF_HALF_INT_KER_WIDTH 1 + #define EV_FF_FULL_INT_KER_LBN 2 + #define EV_FF_FULL_INT_KER_WIDTH 1 + #define ILL_ADR_ERR_INT_KER_LBN 1 + #define ILL_ADR_ERR_INT_KER_WIDTH 1 + #define SRAM_PERR_INT_KER_LBN 0 + #define SRAM_PERR_INT_KER_WIDTH 1 +#define INT_ADR_REG_CHAR_OFST 0x40 /* Interrupt host address for Char driver */ + #define INT_ADR_CHAR_LBN 0 + #define INT_ADR_CHAR_WIDTH 64 + #define DRV_INT_CHAR_LBN 32 + #define DRV_INT_CHAR_WIDTH 1 + #define EV_FF_HALF_INT_CHAR_LBN 3 + #define EV_FF_HALF_INT_CHAR_WIDTH 1 + #define EV_FF_FULL_INT_CHAR_LBN 2 + #define EV_FF_FULL_INT_CHAR_WIDTH 1 + #define ILL_ADR_ERR_INT_CHAR_LBN 1 + #define ILL_ADR_ERR_INT_CHAR_WIDTH 1 + #define SRAM_PERR_INT_CHAR_LBN 0 + #define SRAM_PERR_INT_CHAR_WIDTH 1 +#define INT_ISR0_B0_OFST 0x90 /* B0 only */ +#define INT_ISR1_B0_OFST 0xA0 +#define INT_ACK_REG_KER_A1_OFST 0x50 /* Kernel interrupt acknowledge register */ + #define RESERVED_LBN 0 + #define RESERVED_WIDTH 32 +#define INT_ACK_REG_CHAR_A1_OFST 0x60 /* CHAR interrupt acknowledge register */ + #define RESERVED_LBN 0 + #define RESERVED_WIDTH 32 +/*************---- Global CSR Registers C Header ----*************/ +#define STRAP_REG_KER_OFST 0x200 /* ASIC strap status register */ +#define STRAP_REG_OFST 0x200 /* ASIC strap status register */ + #define ONCHIP_SRAM_LBN 16 + #define ONCHIP_SRAM_WIDTH 0 + #define STRAP_ISCSI_EN_LBN 3 + #define STRAP_ISCSI_EN_WIDTH 1 + #define STRAP_PINS_LBN 0 + #define STRAP_PINS_WIDTH 3 +#define GPIO_CTL_REG_KER_OFST 0x210 /* GPIO control register */ +#define GPIO_CTL_REG_OFST 0x210 /* GPIO control register */ + #define GPIO_OEN_LBN 24 + #define GPIO_OEN_WIDTH 4 + #define GPIO_OUT_LBN 16 + #define GPIO_OUT_WIDTH 4 + #define GPIO_IN_LBN 8 + #define GPIO_IN_WIDTH 4 + #define GPIO_PWRUP_VALUE_LBN 0 + #define GPIO_PWRUP_VALUE_WIDTH 4 +#define GLB_CTL_REG_KER_OFST 0x220 /* Global control register */ +#define GLB_CTL_REG_OFST 0x220 /* Global control register */ + #define SWRST_LBN 0 + #define SWRST_WIDTH 1 +#define FATAL_INTR_REG_KER_OFST 0x230 /* Fatal interrupt register for Kernel */ + #define PCI_BUSERR_INT_KER_EN_LBN 43 + #define PCI_BUSERR_INT_KER_EN_WIDTH 1 + #define SRAM_OOB_INT_KER_EN_LBN 42 + #define SRAM_OOB_INT_KER_EN_WIDTH 1 + #define BUFID_OOB_INT_KER_EN_LBN 41 + #define BUFID_OOB_INT_KER_EN_WIDTH 1 + #define MEM_PERR_INT_KER_EN_LBN 40 + #define MEM_PERR_INT_KER_EN_WIDTH 1 + #define RBUF_OWN_INT_KER_EN_LBN 39 + #define RBUF_OWN_INT_KER_EN_WIDTH 1 + #define TBUF_OWN_INT_KER_EN_LBN 38 + #define TBUF_OWN_INT_KER_EN_WIDTH 1 + #define RDESCQ_OWN_INT_KER_EN_LBN 37 + #define RDESCQ_OWN_INT_KER_EN_WIDTH 1 + #define TDESCQ_OWN_INT_KER_EN_LBN 36 + #define TDESCQ_OWN_INT_KER_EN_WIDTH 1 + #define EVQ_OWN_INT_KER_EN_LBN 35 + #define EVQ_OWN_INT_KER_EN_WIDTH 1 + #define EVFF_OFLO_INT_KER_EN_LBN 34 + #define EVFF_OFLO_INT_KER_EN_WIDTH 1 + #define ILL_ADR_INT_KER_EN_LBN 33 + #define ILL_ADR_INT_KER_EN_WIDTH 1 + #define SRM_PERR_INT_KER_EN_LBN 32 + #define SRM_PERR_INT_KER_EN_WIDTH 1 + #define PCI_BUSERR_INT_KER_LBN 11 + #define PCI_BUSERR_INT_KER_WIDTH 1 + #define SRAM_OOB_INT_KER_LBN 10 + #define SRAM_OOB_INT_KER_WIDTH 1 + #define BUFID_OOB_INT_KER_LBN 9 + #define BUFID_OOB_INT_KER_WIDTH 1 + #define MEM_PERR_INT_KER_LBN 8 + #define MEM_PERR_INT_KER_WIDTH 1 + #define RBUF_OWN_INT_KER_LBN 7 + #define RBUF_OWN_INT_KER_WIDTH 1 + #define TBUF_OWN_INT_KER_LBN 6 + #define TBUF_OWN_INT_KER_WIDTH 1 + #define RDESCQ_OWN_INT_KER_LBN 5 + #define RDESCQ_OWN_INT_KER_WIDTH 1 + #define TDESCQ_OWN_INT_KER_LBN 4 + #define TDESCQ_OWN_INT_KER_WIDTH 1 + #define EVQ_OWN_INT_KER_LBN 3 + #define EVQ_OWN_INT_KER_WIDTH 1 + #define EVFF_OFLO_INT_KER_LBN 2 + #define EVFF_OFLO_INT_KER_WIDTH 1 + #define ILL_ADR_INT_KER_LBN 1 + #define ILL_ADR_INT_KER_WIDTH 1 + #define SRM_PERR_INT_KER_LBN 0 + #define SRM_PERR_INT_KER_WIDTH 1 +#define FATAL_INTR_REG_OFST 0x240 /* Fatal interrupt register for Char */ + #define PCI_BUSERR_INT_CHAR_EN_LBN 43 + #define PCI_BUSERR_INT_CHAR_EN_WIDTH 1 + #define SRAM_OOB_INT_CHAR_EN_LBN 42 + #define SRAM_OOB_INT_CHAR_EN_WIDTH 1 + #define BUFID_OOB_INT_CHAR_EN_LBN 41 + #define BUFID_OOB_INT_CHAR_EN_WIDTH 1 + #define MEM_PERR_INT_CHAR_EN_LBN 40 + #define MEM_PERR_INT_CHAR_EN_WIDTH 1 + #define RBUF_OWN_INT_CHAR_EN_LBN 39 + #define RBUF_OWN_INT_CHAR_EN_WIDTH 1 + #define TBUF_OWN_INT_CHAR_EN_LBN 38 + #define TBUF_OWN_INT_CHAR_EN_WIDTH 1 + #define RDESCQ_OWN_INT_CHAR_EN_LBN 37 + #define RDESCQ_OWN_INT_CHAR_EN_WIDTH 1 + #define TDESCQ_OWN_INT_CHAR_EN_LBN 36 + #define TDESCQ_OWN_INT_CHAR_EN_WIDTH 1 + #define EVQ_OWN_INT_CHAR_EN_LBN 35 + #define EVQ_OWN_INT_CHAR_EN_WIDTH 1 + #define EVFF_OFLO_INT_CHAR_EN_LBN 34 + #define EVFF_OFLO_INT_CHAR_EN_WIDTH 1 + #define ILL_ADR_INT_CHAR_EN_LBN 33 + #define ILL_ADR_INT_CHAR_EN_WIDTH 1 + #define SRM_PERR_INT_CHAR_EN_LBN 32 + #define SRM_PERR_INT_CHAR_EN_WIDTH 1 + #define FATAL_INTR_REG_EN_BITS 0xffffffffffffffffULL + #define PCI_BUSERR_INT_CHAR_LBN 11 + #define PCI_BUSERR_INT_CHAR_WIDTH 1 + #define SRAM_OOB_INT_CHAR_LBN 10 + #define SRAM_OOB_INT_CHAR_WIDTH 1 + #define BUFID_OOB_INT_CHAR_LBN 9 + #define BUFID_OOB_INT_CHAR_WIDTH 1 + #define MEM_PERR_INT_CHAR_LBN 8 + #define MEM_PERR_INT_CHAR_WIDTH 1 + #define RBUF_OWN_INT_CHAR_LBN 7 + #define RBUF_OWN_INT_CHAR_WIDTH 1 + #define TBUF_OWN_INT_CHAR_LBN 6 + #define TBUF_OWN_INT_CHAR_WIDTH 1 + #define RDESCQ_OWN_INT_CHAR_LBN 5 + #define RDESCQ_OWN_INT_CHAR_WIDTH 1 + #define TDESCQ_OWN_INT_CHAR_LBN 4 + #define TDESCQ_OWN_INT_CHAR_WIDTH 1 + #define EVQ_OWN_INT_CHAR_LBN 3 + #define EVQ_OWN_INT_CHAR_WIDTH 1 + #define EVFF_OFLO_INT_CHAR_LBN 2 + #define EVFF_OFLO_INT_CHAR_WIDTH 1 + #define ILL_ADR_INT_CHAR_LBN 1 + #define ILL_ADR_INT_CHAR_WIDTH 1 + #define SRM_PERR_INT_CHAR_LBN 0 + #define SRM_PERR_INT_CHAR_WIDTH 1 +#define DP_CTRL_REG_OFST 0x250 /* Datapath control register */ + #define FLS_EVQ_ID_LBN 0 + #define FLS_EVQ_ID_WIDTH 12 +#define MEM_STAT_REG_KER_OFST 0x260 /* Memory status register */ +#define MEM_STAT_REG_OFST 0x260 /* Memory status register */ + #define MEM_PERR_VEC_LBN 53 + #define MEM_PERR_VEC_WIDTH 38 + #define MBIST_CORR_LBN 38 + #define MBIST_CORR_WIDTH 15 + #define MBIST_ERR_LBN 0 + #define MBIST_ERR_WIDTH 38 +#define DEBUG_REG_KER_OFST 0x270 /* Debug register */ +#define DEBUG_REG_OFST 0x270 /* Debug register */ + #define DEBUG_BLK_SEL2_LBN 47 + #define DEBUG_BLK_SEL2_WIDTH 3 + #define DEBUG_BLK_SEL1_LBN 44 + #define DEBUG_BLK_SEL1_WIDTH 3 + #define DEBUG_BLK_SEL0_LBN 41 + #define DEBUG_BLK_SEL0_WIDTH 3 + #define MISC_DEBUG_ADDR_LBN 36 + #define MISC_DEBUG_ADDR_WIDTH 5 + #define SERDES_DEBUG_ADDR_LBN 31 + #define SERDES_DEBUG_ADDR_WIDTH 5 + #define EM_DEBUG_ADDR_LBN 26 + #define EM_DEBUG_ADDR_WIDTH 5 + #define SR_DEBUG_ADDR_LBN 21 + #define SR_DEBUG_ADDR_WIDTH 5 + #define EV_DEBUG_ADDR_LBN 16 + #define EV_DEBUG_ADDR_WIDTH 5 + #define RX_DEBUG_ADDR_LBN 11 + #define RX_DEBUG_ADDR_WIDTH 5 + #define TX_DEBUG_ADDR_LBN 6 + #define TX_DEBUG_ADDR_WIDTH 5 + #define BIU_DEBUG_ADDR_LBN 1 + #define BIU_DEBUG_ADDR_WIDTH 5 + #define DEBUG_EN_LBN 0 + #define DEBUG_EN_WIDTH 1 +#define DRIVER_REG0_KER_OFST 0x280 /* Driver scratch register 0 */ +#define DRIVER_REG0_OFST 0x280 /* Driver scratch register 0 */ + #define DRIVER_DW0_LBN 0 + #define DRIVER_DW0_WIDTH 32 +#define DRIVER_REG1_KER_OFST 0x290 /* Driver scratch register 1 */ +#define DRIVER_REG1_OFST 0x290 /* Driver scratch register 1 */ + #define DRIVER_DW1_LBN 0 + #define DRIVER_DW1_WIDTH 32 +#define DRIVER_REG2_KER_OFST 0x2A0 /* Driver scratch register 2 */ +#define DRIVER_REG2_OFST 0x2A0 /* Driver scratch register 2 */ + #define DRIVER_DW2_LBN 0 + #define DRIVER_DW2_WIDTH 32 +#define DRIVER_REG3_KER_OFST 0x2B0 /* Driver scratch register 3 */ +#define DRIVER_REG3_OFST 0x2B0 /* Driver scratch register 3 */ + #define DRIVER_DW3_LBN 0 + #define DRIVER_DW3_WIDTH 32 +#define DRIVER_REG4_KER_OFST 0x2C0 /* Driver scratch register 4 */ +#define DRIVER_REG4_OFST 0x2C0 /* Driver scratch register 4 */ + #define DRIVER_DW4_LBN 0 + #define DRIVER_DW4_WIDTH 32 +#define DRIVER_REG5_KER_OFST 0x2D0 /* Driver scratch register 5 */ +#define DRIVER_REG5_OFST 0x2D0 /* Driver scratch register 5 */ + #define DRIVER_DW5_LBN 0 + #define DRIVER_DW5_WIDTH 32 +#define DRIVER_REG6_KER_OFST 0x2E0 /* Driver scratch register 6 */ +#define DRIVER_REG6_OFST 0x2E0 /* Driver scratch register 6 */ + #define DRIVER_DW6_LBN 0 + #define DRIVER_DW6_WIDTH 32 +#define DRIVER_REG7_KER_OFST 0x2F0 /* Driver scratch register 7 */ +#define DRIVER_REG7_OFST 0x2F0 /* Driver scratch register 7 */ + #define DRIVER_DW7_LBN 0 + #define DRIVER_DW7_WIDTH 32 +#define ALTERA_BUILD_REG_OFST 0x300 /* Altera build register */ +#define ALTERA_BUILD_REG_OFST 0x300 /* Altera build register */ + #define ALTERA_BUILD_VER_LBN 0 + #define ALTERA_BUILD_VER_WIDTH 32 + +/* so called CSR spare register + - contains separate parity enable bits for the various internal memory + blocks */ +#define MEM_PARITY_ERR_EN_REG_KER 0x310 +#define MEM_PARITY_ALL_BLOCKS_EN_LBN 64 +#define MEM_PARITY_ALL_BLOCKS_EN_WIDTH 38 +#define MEM_PARITY_TX_DATA_EN_LBN 72 +#define MEM_PARITY_TX_DATA_EN_WIDTH 2 + +/*************---- Event & Timer Module Registers C Header ----*************/ + +#if FALCON_EXTENDED_P_BAR +#define EVQ_RPTR_REG_KER_OFST 0x11B00 /* Event queue read pointer register */ +#else +#define EVQ_RPTR_REG_KER_OFST 0x1B00 /* Event queue read pointer register */ +#endif + +#define EVQ_RPTR_REG_OFST 0xFA0000 /* Event queue read pointer register + array. */ + #define EVQ_RPTR_LBN 0 + #define EVQ_RPTR_WIDTH 15 + +#if FALCON_EXTENDED_P_BAR +#define EVQ_PTR_TBL_KER_OFST 0x11A00 /* Event queue pointer table for kernel + access */ +#else +#define EVQ_PTR_TBL_KER_OFST 0x1A00 /* Event queue pointer table for kernel + access */ +#endif + +#define EVQ_PTR_TBL_CHAR_OFST 0xF60000 /* Event queue pointer table for char + direct access */ + #define EVQ_WKUP_OR_INT_EN_LBN 39 + #define EVQ_WKUP_OR_INT_EN_WIDTH 1 + #define EVQ_NXT_WPTR_LBN 24 + #define EVQ_NXT_WPTR_WIDTH 15 + #define EVQ_EN_LBN 23 + #define EVQ_EN_WIDTH 1 + #define EVQ_SIZE_LBN 20 + #define EVQ_SIZE_WIDTH 3 + #define EVQ_BUF_BASE_ID_LBN 0 + #define EVQ_BUF_BASE_ID_WIDTH 20 +#define TIMER_CMD_REG_KER_OFST 0x420 /* Timer table for kernel access. + Page-mapped */ +#define TIMER_CMD_REG_PAGE4_OFST 0x8420 /* Timer table for user-level access. + Page-mapped. For lowest 1K queues. + */ +#define TIMER_CMD_REG_PAGE123K_OFST 0x1000420 /* Timer table for user-level + access. Page-mapped. + For upper 3K queues. */ +#define TIMER_TBL_OFST 0xF70000 /* Timer table for char driver direct access */ + #define TIMER_MODE_LBN 12 + #define TIMER_MODE_WIDTH 2 + #define TIMER_VAL_LBN 0 + #define TIMER_VAL_WIDTH 12 + #define TIMER_MODE_INT_HLDOFF 2 + #define EVQ_BUF_SIZE_LBN 0 + #define EVQ_BUF_SIZE_WIDTH 1 +#define DRV_EV_REG_KER_OFST 0x440 /* Driver generated event register */ +#define DRV_EV_REG_OFST 0x440 /* Driver generated event register */ + #define DRV_EV_QID_LBN 64 + #define DRV_EV_QID_WIDTH 12 + #define DRV_EV_DATA_LBN 0 + #define DRV_EV_DATA_WIDTH 64 +#define EVQ_CTL_REG_KER_OFST 0x450 /* Event queue control register */ +#define EVQ_CTL_REG_OFST 0x450 /* Event queue control register */ + #define RX_EVQ_WAKEUP_MASK_B0_LBN 15 + #define RX_EVQ_WAKEUP_MASK_B0_WIDTH 6 + #define EVQ_OWNERR_CTL_LBN 14 + #define EVQ_OWNERR_CTL_WIDTH 1 + #define EVQ_FIFO_AF_TH_LBN 8 + #define EVQ_FIFO_AF_TH_WIDTH 6 + #define EVQ_FIFO_NOTAF_TH_LBN 0 + #define EVQ_FIFO_NOTAF_TH_WIDTH 6 +/*************---- SRAM Module Registers C Header ----*************/ +#define BUF_TBL_CFG_REG_KER_OFST 0x600 /* Buffer table configuration register */ +#define BUF_TBL_CFG_REG_OFST 0x600 /* Buffer table configuration register */ + #define BUF_TBL_MODE_LBN 3 + #define BUF_TBL_MODE_WIDTH 1 +#define SRM_RX_DC_CFG_REG_KER_OFST 0x610 /* SRAM receive descriptor cache + configuration register */ +#define SRM_RX_DC_CFG_REG_OFST 0x610 /* SRAM receive descriptor cache + configuration register */ + #define SRM_RX_DC_BASE_ADR_LBN 0 + #define SRM_RX_DC_BASE_ADR_WIDTH 21 +#define SRM_TX_DC_CFG_REG_KER_OFST 0x620 /* SRAM transmit descriptor cache + configuration register */ +#define SRM_TX_DC_CFG_REG_OFST 0x620 /* SRAM transmit descriptor cache + configuration register */ + #define SRM_TX_DC_BASE_ADR_LBN 0 + #define SRM_TX_DC_BASE_ADR_WIDTH 21 +#define SRM_CFG_REG_KER_OFST 0x630 /* SRAM configuration register */ +#define SRM_CFG_REG_OFST 0x630 /* SRAM configuration register */ + #define SRAM_OOB_ADR_INTEN_LBN 5 + #define SRAM_OOB_ADR_INTEN_WIDTH 1 + #define SRAM_OOB_BUF_INTEN_LBN 4 + #define SRAM_OOB_BUF_INTEN_WIDTH 1 + #define SRAM_BT_INIT_EN_LBN 3 + #define SRAM_BT_INIT_EN_WIDTH 1 + #define SRM_NUM_BANK_LBN 2 + #define SRM_NUM_BANK_WIDTH 1 + #define SRM_BANK_SIZE_LBN 0 + #define SRM_BANK_SIZE_WIDTH 2 +#define BUF_TBL_UPD_REG_KER_OFST 0x650 /* Buffer table update register */ +#define BUF_TBL_UPD_REG_OFST 0x650 /* Buffer table update register */ + #define BUF_UPD_CMD_LBN 63 + #define BUF_UPD_CMD_WIDTH 1 + #define BUF_CLR_CMD_LBN 62 + #define BUF_CLR_CMD_WIDTH 1 + #define BUF_CLR_END_ID_LBN 32 + #define BUF_CLR_END_ID_WIDTH 20 + #define BUF_CLR_START_ID_LBN 0 + #define BUF_CLR_START_ID_WIDTH 20 +#define SRM_UPD_EVQ_REG_KER_OFST 0x660 /* Buffer table update register */ +#define SRM_UPD_EVQ_REG_OFST 0x660 /* Buffer table update register */ + #define SRM_UPD_EVQ_ID_LBN 0 + #define SRM_UPD_EVQ_ID_WIDTH 12 +#define SRAM_PARITY_REG_KER_OFST 0x670 /* SRAM parity register. */ +#define SRAM_PARITY_REG_OFST 0x670 /* SRAM parity register. */ + #define FORCE_SRAM_PERR_LBN 0 + #define FORCE_SRAM_PERR_WIDTH 1 + +#if FALCON_EXTENDED_P_BAR +#define BUF_HALF_TBL_KER_OFST 0x18000 /* Buffer table in half buffer table + mode direct access by kernel driver */ +#else +#define BUF_HALF_TBL_KER_OFST 0x8000 /* Buffer table in half buffer table + mode direct access by kernel driver */ +#endif + + +#define BUF_HALF_TBL_OFST 0x800000 /* Buffer table in half buffer table mode + direct access by char driver */ + #define BUF_ADR_HBUF_ODD_LBN 44 + #define BUF_ADR_HBUF_ODD_WIDTH 20 + #define BUF_OWNER_ID_HBUF_ODD_LBN 32 + #define BUF_OWNER_ID_HBUF_ODD_WIDTH 12 + #define BUF_ADR_HBUF_EVEN_LBN 12 + #define BUF_ADR_HBUF_EVEN_WIDTH 20 + #define BUF_OWNER_ID_HBUF_EVEN_LBN 0 + #define BUF_OWNER_ID_HBUF_EVEN_WIDTH 12 + + +#if FALCON_EXTENDED_P_BAR +#define BUF_FULL_TBL_KER_OFST 0x18000 /* Buffer table in full buffer table + mode direct access by kernel driver */ +#else +#define BUF_FULL_TBL_KER_OFST 0x8000 /* Buffer table in full buffer table mode + direct access by kernel driver */ +#endif + + + + +#define BUF_FULL_TBL_OFST 0x800000 /* Buffer table in full buffer table mode + direct access by char driver */ + #define IP_DAT_BUF_SIZE_LBN 50 + #define IP_DAT_BUF_SIZE_WIDTH 1 + #define BUF_ADR_REGION_LBN 48 + #define BUF_ADR_REGION_WIDTH 2 + #define BUF_ADR_FBUF_LBN 14 + #define BUF_ADR_FBUF_WIDTH 34 + #define BUF_OWNER_ID_FBUF_LBN 0 + #define BUF_OWNER_ID_FBUF_WIDTH 14 +#define SRM_DBG_REG_OFST 0x3000000 /* SRAM debug access */ + #define SRM_DBG_LBN 0 + #define SRM_DBG_WIDTH 64 +/*************---- RX Datapath Registers C Header ----*************/ + +#define RX_CFG_REG_KER_OFST 0x800 /* Receive configuration register */ +#define RX_CFG_REG_OFST 0x800 /* Receive configuration register */ + +#if !defined(FALCON_64K_RXFIFO) && !defined(FALCON_PRE_02020029) +# if !defined(FALCON_128K_RXFIFO) +# define FALCON_128K_RXFIFO +# endif +#endif + +#if defined(FALCON_128K_RXFIFO) + +/* new for B0 */ + #define RX_TOEP_TCP_SUPPRESS_B0_LBN 48 + #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1 + #define RX_INGR_EN_B0_LBN 47 + #define RX_INGR_EN_B0_WIDTH 1 + #define RX_TOEP_IPV4_B0_LBN 46 + #define RX_TOEP_IPV4_B0_WIDTH 1 + #define RX_HASH_ALG_B0_LBN 45 + #define RX_HASH_ALG_B0_WIDTH 1 + #define RX_HASH_INSERT_HDR_B0_LBN 44 + #define RX_HASH_INSERT_HDR_B0_WIDTH 1 +/* moved for B0 */ + #define RX_DESC_PUSH_EN_B0_LBN 43 + #define RX_DESC_PUSH_EN_B0_WIDTH 1 + #define RX_RDW_PATCH_EN_LBN 42 /* Non head of line blocking */ + #define RX_RDW_PATCH_EN_WIDTH 1 + #define RX_PCI_BURST_SIZE_B0_LBN 39 + #define RX_PCI_BURST_SIZE_B0_WIDTH 3 + #define RX_OWNERR_CTL_B0_LBN 38 + #define RX_OWNERR_CTL_B0_WIDTH 1 + #define RX_XON_TX_TH_B0_LBN 33 + #define RX_XON_TX_TH_B0_WIDTH 5 + #define RX_XOFF_TX_TH_B0_LBN 28 + #define RX_XOFF_TX_TH_B0_WIDTH 5 + #define RX_USR_BUF_SIZE_B0_LBN 19 + #define RX_USR_BUF_SIZE_B0_WIDTH 9 + #define RX_XON_MAC_TH_B0_LBN 10 + #define RX_XON_MAC_TH_B0_WIDTH 9 + #define RX_XOFF_MAC_TH_B0_LBN 1 + #define RX_XOFF_MAC_TH_B0_WIDTH 9 + #define RX_XOFF_MAC_EN_B0_LBN 0 + #define RX_XOFF_MAC_EN_B0_WIDTH 1 + +#elif !defined(FALCON_PRE_02020029) +/* new for B0 */ + #define RX_TOEP_TCP_SUPPRESS_B0_LBN 46 + #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1 + #define RX_INGR_EN_B0_LBN 45 + #define RX_INGR_EN_B0_WIDTH 1 + #define RX_TOEP_IPV4_B0_LBN 44 + #define RX_TOEP_IPV4_B0_WIDTH 1 + #define RX_HASH_ALG_B0_LBN 43 + #define RX_HASH_ALG_B0_WIDTH 41 + #define RX_HASH_INSERT_HDR_B0_LBN 42 + #define RX_HASH_INSERT_HDR_B0_WIDTH 1 +/* moved for B0 */ + #define RX_DESC_PUSH_EN_B0_LBN 41 + #define RX_DESC_PUSH_EN_B0_WIDTH 1 + #define RX_PCI_BURST_SIZE_B0_LBN 37 + #define RX_PCI_BURST_SIZE_B0_WIDTH 3 + #define RX_OWNERR_CTL_B0_LBN 36 + #define RX_OWNERR_CTL_B0_WIDTH 1 + #define RX_XON_TX_TH_B0_LBN 31 + #define RX_XON_TX_TH_B0_WIDTH 5 + #define RX_XOFF_TX_TH_B0_LBN 26 + #define RX_XOFF_TX_TH_B0_WIDTH 5 + #define RX_USR_BUF_SIZE_B0_LBN 17 + #define RX_USR_BUF_SIZE_B0_WIDTH 9 + #define RX_XON_MAC_TH_B0_LBN 9 + #define RX_XON_MAC_TH_B0_WIDTH 8 + #define RX_XOFF_MAC_TH_B0_LBN 1 + #define RX_XOFF_MAC_TH_B0_WIDTH 8 + #define RX_XOFF_MAC_EN_B0_LBN 0 + #define RX_XOFF_MAC_EN_B0_WIDTH 1 + +#else +/* new for B0 */ + #define RX_TOEP_TCP_SUPPRESS_B0_LBN 44 + #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1 + #define RX_INGR_EN_B0_LBN 43 + #define RX_INGR_EN_B0_WIDTH 1 + #define RX_TOEP_IPV4_B0_LBN 42 + #define RX_TOEP_IPV4_B0_WIDTH 1 + #define RX_HASH_ALG_B0_LBN 41 + #define RX_HASH_ALG_B0_WIDTH 41 + #define RX_HASH_INSERT_HDR_B0_LBN 40 + #define RX_HASH_INSERT_HDR_B0_WIDTH 1 +/* moved for B0 */ + #define RX_DESC_PUSH_EN_B0_LBN 35 + #define RX_DESC_PUSH_EN_B0_WIDTH 1 + #define RX_PCI_BURST_SIZE_B0_LBN 35 + #define RX_PCI_BURST_SIZE_B0_WIDTH 2 + #define RX_OWNERR_CTL_B0_LBN 34 + #define RX_OWNERR_CTL_B0_WIDTH 1 + #define RX_XON_TX_TH_B0_LBN 29 + #define RX_XON_TX_TH_B0_WIDTH 5 + #define RX_XOFF_TX_TH_B0_LBN 24 + #define RX_XOFF_TX_TH_B0_WIDTH 5 + #define RX_USR_BUF_SIZE_B0_LBN 15 + #define RX_USR_BUF_SIZE_B0_WIDTH 9 + #define RX_XON_MAC_TH_B0_LBN 8 + #define RX_XON_MAC_TH_B0_WIDTH 7 + #define RX_XOFF_MAC_TH_B0_LBN 1 + #define RX_XOFF_MAC_TH_B0_WIDTH 7 + #define RX_XOFF_MAC_EN_B0_LBN 0 + #define RX_XOFF_MAC_EN_B0_WIDTH 1 + +#endif + +/* A0/A1 */ + #define RX_PUSH_EN_A1_LBN 35 + #define RX_PUSH_EN_A1_WIDTH 1 + #define RX_PCI_BURST_SIZE_A1_LBN 31 + #define RX_PCI_BURST_SIZE_A1_WIDTH 3 + #define RX_OWNERR_CTL_A1_LBN 30 + #define RX_OWNERR_CTL_A1_WIDTH 1 + #define RX_XON_TX_TH_A1_LBN 25 + #define RX_XON_TX_TH_A1_WIDTH 5 + #define RX_XOFF_TX_TH_A1_LBN 20 + #define RX_XOFF_TX_TH_A1_WIDTH 5 + #define RX_USR_BUF_SIZE_A1_LBN 11 + #define RX_USR_BUF_SIZE_A1_WIDTH 9 + #define RX_XON_MAC_TH_A1_LBN 6 + #define RX_XON_MAC_TH_A1_WIDTH 5 + #define RX_XOFF_MAC_TH_A1_LBN 1 + #define RX_XOFF_MAC_TH_A1_WIDTH 5 + #define RX_XOFF_MAC_EN_A1_LBN 0 + #define RX_XOFF_MAC_EN_A1_WIDTH 1 + +#define RX_FILTER_CTL_REG_OFST 0x810 /* Receive filter control registers */ + #define SCATTER_ENBL_NO_MATCH_Q_B0_LBN 40 + #define SCATTER_ENBL_NO_MATCH_Q_B0_WIDTH 1 + #define UDP_FULL_SRCH_LIMIT_LBN 32 + #define UDP_FULL_SRCH_LIMIT_WIDTH 8 + #define NUM_KER_LBN 24 + #define NUM_KER_WIDTH 2 + #define UDP_WILD_SRCH_LIMIT_LBN 16 + #define UDP_WILD_SRCH_LIMIT_WIDTH 8 + #define TCP_WILD_SRCH_LIMIT_LBN 8 + #define TCP_WILD_SRCH_LIMIT_WIDTH 8 + #define TCP_FULL_SRCH_LIMIT_LBN 0 + #define TCP_FULL_SRCH_LIMIT_WIDTH 8 +#define RX_FLUSH_DESCQ_REG_KER_OFST 0x820 /* Receive flush descriptor queue + register */ +#define RX_FLUSH_DESCQ_REG_OFST 0x820 /* Receive flush descriptor queue + register */ + #define RX_FLUSH_DESCQ_CMD_LBN 24 + #define RX_FLUSH_DESCQ_CMD_WIDTH 1 + #define RX_FLUSH_EVQ_ID_LBN 12 + #define RX_FLUSH_EVQ_ID_WIDTH 12 + #define RX_FLUSH_DESCQ_LBN 0 + #define RX_FLUSH_DESCQ_WIDTH 12 +#define RX_DESC_UPD_REG_KER_OFST 0x830 /* Kernel receive descriptor update + register. Page-mapped */ +#define RX_DESC_UPD_REG_PAGE4_OFST 0x8830 /* Char & user receive descriptor + update register. Page-mapped. + For lowest 1K queues. */ +#define RX_DESC_UPD_REG_PAGE123K_OFST 0x1000830 /* Char & user receive + descriptor update register. + Page-mapped. For upper + 3K queues. */ + #define RX_DESC_WPTR_LBN 96 + #define RX_DESC_WPTR_WIDTH 12 + #define RX_DESC_PUSH_CMD_LBN 95 + #define RX_DESC_PUSH_CMD_WIDTH 1 + #define RX_DESC_LBN 0 + #define RX_DESC_WIDTH 64 + #define RX_KER_DESC_LBN 0 + #define RX_KER_DESC_WIDTH 64 + #define RX_USR_DESC_LBN 0 + #define RX_USR_DESC_WIDTH 32 +#define RX_DC_CFG_REG_KER_OFST 0x840 /* Receive descriptor cache + configuration register */ +#define RX_DC_CFG_REG_OFST 0x840 /* Receive descriptor cache + configuration register */ + #define RX_DC_SIZE_LBN 0 + #define RX_DC_SIZE_WIDTH 2 +#define RX_DC_PF_WM_REG_KER_OFST 0x850 /* Receive descriptor cache pre-fetch + watermark register */ +#define RX_DC_PF_WM_REG_OFST 0x850 /* Receive descriptor cache pre-fetch + watermark register */ + #define RX_DC_PF_LWM_LO_LBN 0 + #define RX_DC_PF_LWM_LO_WIDTH 6 + +#define RX_RSS_TKEY_B0_OFST 0x860 /* RSS Toeplitz hash key (B0 only) */ + +#define RX_NODESC_DROP_REG 0x880 + #define RX_NODESC_DROP_CNT_LBN 0 + #define RX_NODESC_DROP_CNT_WIDTH 16 + +#define XM_TX_CFG_REG_OFST 0x1230 + #define XM_AUTO_PAD_LBN 5 + #define XM_AUTO_PAD_WIDTH 1 + +#define RX_FILTER_TBL0_OFST 0xF00000 /* Receive filter table - even entries */ + #define RSS_EN_0_B0_LBN 110 + #define RSS_EN_0_B0_WIDTH 1 + #define SCATTER_EN_0_B0_LBN 109 + #define SCATTER_EN_0_B0_WIDTH 1 + #define TCP_UDP_0_LBN 108 + #define TCP_UDP_0_WIDTH 1 + #define RXQ_ID_0_LBN 96 + #define RXQ_ID_0_WIDTH 12 + #define DEST_IP_0_LBN 64 + #define DEST_IP_0_WIDTH 32 + #define DEST_PORT_TCP_0_LBN 48 + #define DEST_PORT_TCP_0_WIDTH 16 + #define SRC_IP_0_LBN 16 + #define SRC_IP_0_WIDTH 32 + #define SRC_TCP_DEST_UDP_0_LBN 0 + #define SRC_TCP_DEST_UDP_0_WIDTH 16 +#define RX_FILTER_TBL1_OFST 0xF00010 /* Receive filter table - odd entries */ + #define RSS_EN_1_B0_LBN 110 + #define RSS_EN_1_B0_WIDTH 1 + #define SCATTER_EN_1_B0_LBN 109 + #define SCATTER_EN_1_B0_WIDTH 1 + #define TCP_UDP_1_LBN 108 + #define TCP_UDP_1_WIDTH 1 + #define RXQ_ID_1_LBN 96 + #define RXQ_ID_1_WIDTH 12 + #define DEST_IP_1_LBN 64 + #define DEST_IP_1_WIDTH 32 + #define DEST_PORT_TCP_1_LBN 48 + #define DEST_PORT_TCP_1_WIDTH 16 + #define SRC_IP_1_LBN 16 + #define SRC_IP_1_WIDTH 32 + #define SRC_TCP_DEST_UDP_1_LBN 0 + #define SRC_TCP_DEST_UDP_1_WIDTH 16 + +#if FALCON_EXTENDED_P_BAR +#define RX_DESC_PTR_TBL_KER_OFST 0x11800 /* Receive descriptor pointer + kernel access */ +#else +#define RX_DESC_PTR_TBL_KER_OFST 0x1800 /* Receive descriptor pointer + kernel access */ +#endif + + +#define RX_DESC_PTR_TBL_OFST 0xF40000 /* Receive descriptor pointer table */ + #define RX_ISCSI_DDIG_EN_LBN 88 + #define RX_ISCSI_DDIG_EN_WIDTH 1 + #define RX_ISCSI_HDIG_EN_LBN 87 + #define RX_ISCSI_HDIG_EN_WIDTH 1 + #define RX_DESC_PREF_ACT_LBN 86 + #define RX_DESC_PREF_ACT_WIDTH 1 + #define RX_DC_HW_RPTR_LBN 80 + #define RX_DC_HW_RPTR_WIDTH 6 + #define RX_DESCQ_HW_RPTR_LBN 68 + #define RX_DESCQ_HW_RPTR_WIDTH 12 + #define RX_DESCQ_SW_WPTR_LBN 56 + #define RX_DESCQ_SW_WPTR_WIDTH 12 + #define RX_DESCQ_BUF_BASE_ID_LBN 36 + #define RX_DESCQ_BUF_BASE_ID_WIDTH 20 + #define RX_DESCQ_EVQ_ID_LBN 24 + #define RX_DESCQ_EVQ_ID_WIDTH 12 + #define RX_DESCQ_OWNER_ID_LBN 10 + #define RX_DESCQ_OWNER_ID_WIDTH 14 + #define RX_DESCQ_LABEL_LBN 5 + #define RX_DESCQ_LABEL_WIDTH 5 + #define RX_DESCQ_SIZE_LBN 3 + #define RX_DESCQ_SIZE_WIDTH 2 + #define RX_DESCQ_TYPE_LBN 2 + #define RX_DESCQ_TYPE_WIDTH 1 + #define RX_DESCQ_JUMBO_LBN 1 + #define RX_DESCQ_JUMBO_WIDTH 1 + #define RX_DESCQ_EN_LBN 0 + #define RX_DESCQ_EN_WIDTH 1 + + +#define RX_RSS_INDIR_TBL_B0_OFST 0xFB0000 /* RSS indirection table (B0 only) */ + #define RX_RSS_INDIR_ENT_B0_LBN 0 + #define RX_RSS_INDIR_ENT_B0_WIDTH 6 + +/*************---- TX Datapath Registers C Header ----*************/ +#define TX_FLUSH_DESCQ_REG_KER_OFST 0xA00 /* Transmit flush descriptor + queue register */ +#define TX_FLUSH_DESCQ_REG_OFST 0xA00 /* Transmit flush descriptor queue + register */ + #define TX_FLUSH_DESCQ_CMD_LBN 12 + #define TX_FLUSH_DESCQ_CMD_WIDTH 1 + #define TX_FLUSH_DESCQ_LBN 0 + #define TX_FLUSH_DESCQ_WIDTH 12 +#define TX_DESC_UPD_REG_KER_OFST 0xA10 /* Kernel transmit descriptor update + register. Page-mapped */ +#define TX_DESC_UPD_REG_PAGE4_OFST 0x8A10 /* Char & user transmit descriptor + update register. Page-mapped */ +#define TX_DESC_UPD_REG_PAGE123K_OFST 0x1000A10 /* Char & user transmit + descriptor update register. + Page-mapped */ + #define TX_DESC_WPTR_LBN 96 + #define TX_DESC_WPTR_WIDTH 12 + #define TX_DESC_PUSH_CMD_LBN 95 + #define TX_DESC_PUSH_CMD_WIDTH 1 + #define TX_DESC_LBN 0 + #define TX_DESC_WIDTH 95 + #define TX_KER_DESC_LBN 0 + #define TX_KER_DESC_WIDTH 64 + #define TX_USR_DESC_LBN 0 + #define TX_USR_DESC_WIDTH 64 +#define TX_DC_CFG_REG_KER_OFST 0xA20 /* Transmit descriptor cache + configuration register */ +#define TX_DC_CFG_REG_OFST 0xA20 /* Transmit descriptor cache configuration + register */ + #define TX_DC_SIZE_LBN 0 + #define TX_DC_SIZE_WIDTH 2 + +#if FALCON_EXTENDED_P_BAR +#define TX_DESC_PTR_TBL_KER_OFST 0x11900 /* Transmit descriptor pointer. */ +#else +#define TX_DESC_PTR_TBL_KER_OFST 0x1900 /* Transmit descriptor pointer. */ +#endif + + +#define TX_DESC_PTR_TBL_OFST 0xF50000 /* Transmit descriptor pointer */ + #define TX_NON_IP_DROP_DIS_B0_LBN 91 + #define TX_NON_IP_DROP_DIS_B0_WIDTH 1 + #define TX_IP_CHKSM_DIS_B0_LBN 90 + #define TX_IP_CHKSM_DIS_B0_WIDTH 1 + #define TX_TCP_CHKSM_DIS_B0_LBN 89 + #define TX_TCP_CHKSM_DIS_B0_WIDTH 1 + #define TX_DESCQ_EN_LBN 88 + #define TX_DESCQ_EN_WIDTH 1 + #define TX_ISCSI_DDIG_EN_LBN 87 + #define TX_ISCSI_DDIG_EN_WIDTH 1 + #define TX_ISCSI_HDIG_EN_LBN 86 + #define TX_ISCSI_HDIG_EN_WIDTH 1 + #define TX_DC_HW_RPTR_LBN 80 + #define TX_DC_HW_RPTR_WIDTH 6 + #define TX_DESCQ_HW_RPTR_LBN 68 + #define TX_DESCQ_HW_RPTR_WIDTH 12 + #define TX_DESCQ_SW_WPTR_LBN 56 + #define TX_DESCQ_SW_WPTR_WIDTH 12 + #define TX_DESCQ_BUF_BASE_ID_LBN 36 + #define TX_DESCQ_BUF_BASE_ID_WIDTH 20 + #define TX_DESCQ_EVQ_ID_LBN 24 + #define TX_DESCQ_EVQ_ID_WIDTH 12 + #define TX_DESCQ_OWNER_ID_LBN 10 + #define TX_DESCQ_OWNER_ID_WIDTH 14 + #define TX_DESCQ_LABEL_LBN 5 + #define TX_DESCQ_LABEL_WIDTH 5 + #define TX_DESCQ_SIZE_LBN 3 + #define TX_DESCQ_SIZE_WIDTH 2 + #define TX_DESCQ_TYPE_LBN 1 + #define TX_DESCQ_TYPE_WIDTH 2 + #define TX_DESCQ_FLUSH_LBN 0 + #define TX_DESCQ_FLUSH_WIDTH 1 +#define TX_CFG_REG_KER_OFST 0xA50 /* Transmit configuration register */ +#define TX_CFG_REG_OFST 0xA50 /* Transmit configuration register */ + #define TX_IP_ID_P1_OFS_LBN 32 + #define TX_IP_ID_P1_OFS_WIDTH 15 + #define TX_IP_ID_P0_OFS_LBN 16 + #define TX_IP_ID_P0_OFS_WIDTH 15 + #define TX_TURBO_EN_LBN 3 + #define TX_TURBO_EN_WIDTH 1 + #define TX_OWNERR_CTL_LBN 2 + #define TX_OWNERR_CTL_WIDTH 2 + #define TX_NON_IP_DROP_DIS_LBN 1 + #define TX_NON_IP_DROP_DIS_WIDTH 1 + #define TX_IP_ID_REP_EN_LBN 0 + #define TX_IP_ID_REP_EN_WIDTH 1 +#define TX_RESERVED_REG_KER_OFST 0xA80 /* Transmit configuration register */ +#define TX_RESERVED_REG_OFST 0xA80 /* Transmit configuration register */ + #define TX_CSR_PUSH_EN_LBN 89 + #define TX_CSR_PUSH_EN_WIDTH 1 + #define TX_RX_SPACER_LBN 64 + #define TX_RX_SPACER_WIDTH 8 + #define TX_SW_EV_EN_LBN 59 + #define TX_SW_EV_EN_WIDTH 1 + #define TX_RX_SPACER_EN_LBN 57 + #define TX_RX_SPACER_EN_WIDTH 1 + #define TX_CSR_PREF_WD_TMR_LBN 24 + #define TX_CSR_PREF_WD_TMR_WIDTH 16 + #define TX_CSR_ONLY1TAG_LBN 21 + #define TX_CSR_ONLY1TAG_WIDTH 1 + #define TX_PREF_THRESHOLD_LBN 19 + #define TX_PREF_THRESHOLD_WIDTH 2 + #define TX_ONE_PKT_PER_Q_LBN 18 + #define TX_ONE_PKT_PER_Q_WIDTH 1 + #define TX_DIS_NON_IP_EV_LBN 17 + #define TX_DIS_NON_IP_EV_WIDTH 1 + #define TX_DMA_SPACER_LBN 8 + #define TX_DMA_SPACER_WIDTH 8 + #define TX_FLUSH_MIN_LEN_EN_B0_LBN 7 + #define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1 + #define TX_TCP_DIS_A1_LBN 7 + #define TX_TCP_DIS_A1_WIDTH 1 + #define TX_IP_DIS_A1_LBN 6 + #define TX_IP_DIS_A1_WIDTH 1 + #define TX_MAX_CPL_LBN 2 + #define TX_MAX_CPL_WIDTH 2 + #define TX_MAX_PREF_LBN 0 + #define TX_MAX_PREF_WIDTH 2 +#define TX_VLAN_REG_OFST 0xAE0 /* Transmit VLAN tag register */ + #define TX_VLAN_EN_LBN 127 + #define TX_VLAN_EN_WIDTH 1 + #define TX_VLAN7_PORT1_EN_LBN 125 + #define TX_VLAN7_PORT1_EN_WIDTH 1 + #define TX_VLAN7_PORT0_EN_LBN 124 + #define TX_VLAN7_PORT0_EN_WIDTH 1 + #define TX_VLAN7_LBN 112 + #define TX_VLAN7_WIDTH 12 + #define TX_VLAN6_PORT1_EN_LBN 109 + #define TX_VLAN6_PORT1_EN_WIDTH 1 + #define TX_VLAN6_PORT0_EN_LBN 108 + #define TX_VLAN6_PORT0_EN_WIDTH 1 + #define TX_VLAN6_LBN 96 + #define TX_VLAN6_WIDTH 12 + #define TX_VLAN5_PORT1_EN_LBN 93 + #define TX_VLAN5_PORT1_EN_WIDTH 1 + #define TX_VLAN5_PORT0_EN_LBN 92 + #define TX_VLAN5_PORT0_EN_WIDTH 1 + #define TX_VLAN5_LBN 80 + #define TX_VLAN5_WIDTH 12 + #define TX_VLAN4_PORT1_EN_LBN 77 + #define TX_VLAN4_PORT1_EN_WIDTH 1 + #define TX_VLAN4_PORT0_EN_LBN 76 + #define TX_VLAN4_PORT0_EN_WIDTH 1 + #define TX_VLAN4_LBN 64 + #define TX_VLAN4_WIDTH 12 + #define TX_VLAN3_PORT1_EN_LBN 61 + #define TX_VLAN3_PORT1_EN_WIDTH 1 + #define TX_VLAN3_PORT0_EN_LBN 60 + #define TX_VLAN3_PORT0_EN_WIDTH 1 + #define TX_VLAN3_LBN 48 + #define TX_VLAN3_WIDTH 12 + #define TX_VLAN2_PORT1_EN_LBN 45 + #define TX_VLAN2_PORT1_EN_WIDTH 1 + #define TX_VLAN2_PORT0_EN_LBN 44 + #define TX_VLAN2_PORT0_EN_WIDTH 1 + #define TX_VLAN2_LBN 32 + #define TX_VLAN2_WIDTH 12 + #define TX_VLAN1_PORT1_EN_LBN 29 + #define TX_VLAN1_PORT1_EN_WIDTH 1 + #define TX_VLAN1_PORT0_EN_LBN 28 + #define TX_VLAN1_PORT0_EN_WIDTH 1 + #define TX_VLAN1_LBN 16 + #define TX_VLAN1_WIDTH 12 + #define TX_VLAN0_PORT1_EN_LBN 13 + #define TX_VLAN0_PORT1_EN_WIDTH 1 + #define TX_VLAN0_PORT0_EN_LBN 12 + #define TX_VLAN0_PORT0_EN_WIDTH 1 + #define TX_VLAN0_LBN 0 + #define TX_VLAN0_WIDTH 12 +#define TX_FIL_CTL_REG_OFST 0xAF0 /* Transmit filter control register */ + #define TX_MADR1_FIL_EN_LBN 65 + #define TX_MADR1_FIL_EN_WIDTH 1 + #define TX_MADR0_FIL_EN_LBN 64 + #define TX_MADR0_FIL_EN_WIDTH 1 + #define TX_IPFIL31_PORT1_EN_LBN 63 + #define TX_IPFIL31_PORT1_EN_WIDTH 1 + #define TX_IPFIL31_PORT0_EN_LBN 62 + #define TX_IPFIL31_PORT0_EN_WIDTH 1 + #define TX_IPFIL30_PORT1_EN_LBN 61 + #define TX_IPFIL30_PORT1_EN_WIDTH 1 + #define TX_IPFIL30_PORT0_EN_LBN 60 + #define TX_IPFIL30_PORT0_EN_WIDTH 1 + #define TX_IPFIL29_PORT1_EN_LBN 59 + #define TX_IPFIL29_PORT1_EN_WIDTH 1 + #define TX_IPFIL29_PORT0_EN_LBN 58 + #define TX_IPFIL29_PORT0_EN_WIDTH 1 + #define TX_IPFIL28_PORT1_EN_LBN 57 + #define TX_IPFIL28_PORT1_EN_WIDTH 1 + #define TX_IPFIL28_PORT0_EN_LBN 56 + #define TX_IPFIL28_PORT0_EN_WIDTH 1 + #define TX_IPFIL27_PORT1_EN_LBN 55 + #define TX_IPFIL27_PORT1_EN_WIDTH 1 + #define TX_IPFIL27_PORT0_EN_LBN 54 + #define TX_IPFIL27_PORT0_EN_WIDTH 1 + #define TX_IPFIL26_PORT1_EN_LBN 53 + #define TX_IPFIL26_PORT1_EN_WIDTH 1 + #define TX_IPFIL26_PORT0_EN_LBN 52 + #define TX_IPFIL26_PORT0_EN_WIDTH 1 + #define TX_IPFIL25_PORT1_EN_LBN 51 + #define TX_IPFIL25_PORT1_EN_WIDTH 1 + #define TX_IPFIL25_PORT0_EN_LBN 50 + #define TX_IPFIL25_PORT0_EN_WIDTH 1 + #define TX_IPFIL24_PORT1_EN_LBN 49 + #define TX_IPFIL24_PORT1_EN_WIDTH 1 + #define TX_IPFIL24_PORT0_EN_LBN 48 + #define TX_IPFIL24_PORT0_EN_WIDTH 1 + #define TX_IPFIL23_PORT1_EN_LBN 47 + #define TX_IPFIL23_PORT1_EN_WIDTH 1 + #define TX_IPFIL23_PORT0_EN_LBN 46 + #define TX_IPFIL23_PORT0_EN_WIDTH 1 + #define TX_IPFIL22_PORT1_EN_LBN 45 + #define TX_IPFIL22_PORT1_EN_WIDTH 1 + #define TX_IPFIL22_PORT0_EN_LBN 44 + #define TX_IPFIL22_PORT0_EN_WIDTH 1 + #define TX_IPFIL21_PORT1_EN_LBN 43 + #define TX_IPFIL21_PORT1_EN_WIDTH 1 + #define TX_IPFIL21_PORT0_EN_LBN 42 + #define TX_IPFIL21_PORT0_EN_WIDTH 1 + #define TX_IPFIL20_PORT1_EN_LBN 41 + #define TX_IPFIL20_PORT1_EN_WIDTH 1 + #define TX_IPFIL20_PORT0_EN_LBN 40 + #define TX_IPFIL20_PORT0_EN_WIDTH 1 + #define TX_IPFIL19_PORT1_EN_LBN 39 + #define TX_IPFIL19_PORT1_EN_WIDTH 1 + #define TX_IPFIL19_PORT0_EN_LBN 38 + #define TX_IPFIL19_PORT0_EN_WIDTH 1 + #define TX_IPFIL18_PORT1_EN_LBN 37 + #define TX_IPFIL18_PORT1_EN_WIDTH 1 + #define TX_IPFIL18_PORT0_EN_LBN 36 + #define TX_IPFIL18_PORT0_EN_WIDTH 1 + #define TX_IPFIL17_PORT1_EN_LBN 35 + #define TX_IPFIL17_PORT1_EN_WIDTH 1 + #define TX_IPFIL17_PORT0_EN_LBN 34 + #define TX_IPFIL17_PORT0_EN_WIDTH 1 + #define TX_IPFIL16_PORT1_EN_LBN 33 + #define TX_IPFIL16_PORT1_EN_WIDTH 1 + #define TX_IPFIL16_PORT0_EN_LBN 32 + #define TX_IPFIL16_PORT0_EN_WIDTH 1 + #define TX_IPFIL15_PORT1_EN_LBN 31 + #define TX_IPFIL15_PORT1_EN_WIDTH 1 + #define TX_IPFIL15_PORT0_EN_LBN 30 + #define TX_IPFIL15_PORT0_EN_WIDTH 1 + #define TX_IPFIL14_PORT1_EN_LBN 29 + #define TX_IPFIL14_PORT1_EN_WIDTH 1 + #define TX_IPFIL14_PORT0_EN_LBN 28 + #define TX_IPFIL14_PORT0_EN_WIDTH 1 + #define TX_IPFIL13_PORT1_EN_LBN 27 + #define TX_IPFIL13_PORT1_EN_WIDTH 1 + #define TX_IPFIL13_PORT0_EN_LBN 26 + #define TX_IPFIL13_PORT0_EN_WIDTH 1 + #define TX_IPFIL12_PORT1_EN_LBN 25 + #define TX_IPFIL12_PORT1_EN_WIDTH 1 + #define TX_IPFIL12_PORT0_EN_LBN 24 + #define TX_IPFIL12_PORT0_EN_WIDTH 1 + #define TX_IPFIL11_PORT1_EN_LBN 23 + #define TX_IPFIL11_PORT1_EN_WIDTH 1 + #define TX_IPFIL11_PORT0_EN_LBN 22 + #define TX_IPFIL11_PORT0_EN_WIDTH 1 + #define TX_IPFIL10_PORT1_EN_LBN 21 + #define TX_IPFIL10_PORT1_EN_WIDTH 1 + #define TX_IPFIL10_PORT0_EN_LBN 20 + #define TX_IPFIL10_PORT0_EN_WIDTH 1 + #define TX_IPFIL9_PORT1_EN_LBN 19 + #define TX_IPFIL9_PORT1_EN_WIDTH 1 + #define TX_IPFIL9_PORT0_EN_LBN 18 + #define TX_IPFIL9_PORT0_EN_WIDTH 1 + #define TX_IPFIL8_PORT1_EN_LBN 17 + #define TX_IPFIL8_PORT1_EN_WIDTH 1 + #define TX_IPFIL8_PORT0_EN_LBN 16 + #define TX_IPFIL8_PORT0_EN_WIDTH 1 + #define TX_IPFIL7_PORT1_EN_LBN 15 + #define TX_IPFIL7_PORT1_EN_WIDTH 1 + #define TX_IPFIL7_PORT0_EN_LBN 14 + #define TX_IPFIL7_PORT0_EN_WIDTH 1 + #define TX_IPFIL6_PORT1_EN_LBN 13 + #define TX_IPFIL6_PORT1_EN_WIDTH 1 + #define TX_IPFIL6_PORT0_EN_LBN 12 + #define TX_IPFIL6_PORT0_EN_WIDTH 1 + #define TX_IPFIL5_PORT1_EN_LBN 11 + #define TX_IPFIL5_PORT1_EN_WIDTH 1 + #define TX_IPFIL5_PORT0_EN_LBN 10 + #define TX_IPFIL5_PORT0_EN_WIDTH 1 + #define TX_IPFIL4_PORT1_EN_LBN 9 + #define TX_IPFIL4_PORT1_EN_WIDTH 1 + #define TX_IPFIL4_PORT0_EN_LBN 8 + #define TX_IPFIL4_PORT0_EN_WIDTH 1 + #define TX_IPFIL3_PORT1_EN_LBN 7 + #define TX_IPFIL3_PORT1_EN_WIDTH 1 + #define TX_IPFIL3_PORT0_EN_LBN 6 + #define TX_IPFIL3_PORT0_EN_WIDTH 1 + #define TX_IPFIL2_PORT1_EN_LBN 5 + #define TX_IPFIL2_PORT1_EN_WIDTH 1 + #define TX_IPFIL2_PORT0_EN_LBN 4 + #define TX_IPFIL2_PORT0_EN_WIDTH 1 + #define TX_IPFIL1_PORT1_EN_LBN 3 + #define TX_IPFIL1_PORT1_EN_WIDTH 1 + #define TX_IPFIL1_PORT0_EN_LBN 2 + #define TX_IPFIL1_PORT0_EN_WIDTH 1 + #define TX_IPFIL0_PORT1_EN_LBN 1 + #define TX_IPFIL0_PORT1_EN_WIDTH 1 + #define TX_IPFIL0_PORT0_EN_LBN 0 + #define TX_IPFIL0_PORT0_EN_WIDTH 1 +#define TX_IPFIL_TBL_OFST 0xB00 /* Transmit IP source address filter table */ + #define TX_IPFIL_MASK_LBN 32 + #define TX_IPFIL_MASK_WIDTH 32 + #define TX_IP_SRC_ADR_LBN 0 + #define TX_IP_SRC_ADR_WIDTH 32 +#define TX_PACE_REG_A1_OFST 0xF80000 /* Transmit pace control register */ +#define TX_PACE_REG_B0_OFST 0xA90 /* Transmit pace control register */ + #define TX_PACE_SB_AF_LBN 19 + #define TX_PACE_SB_AF_WIDTH 10 + #define TX_PACE_SB_NOTAF_LBN 9 + #define TX_PACE_SB_NOTAF_WIDTH 10 + #define TX_PACE_FB_BASE_LBN 5 + #define TX_PACE_FB_BASE_WIDTH 4 + #define TX_PACE_BIN_TH_LBN 0 + #define TX_PACE_BIN_TH_WIDTH 5 +#define TX_PACE_TBL_A1_OFST 0xF80040 /* Transmit pacing table */ +#define TX_PACE_TBL_FIRST_QUEUE_A1 4 +#define TX_PACE_TBL_B0_OFST 0xF80000 /* Transmit pacing table */ +#define TX_PACE_TBL_FIRST_QUEUE_B0 0 + #define TX_PACE_LBN 0 + #define TX_PACE_WIDTH 5 + +/*************---- EE/Flash Registers C Header ----*************/ +#define EE_SPI_HCMD_REG_KER_OFST 0x100 /* SPI host command register */ +#define EE_SPI_HCMD_REG_OFST 0x100 /* SPI host command register */ + #define EE_SPI_HCMD_CMD_EN_LBN 31 + #define EE_SPI_HCMD_CMD_EN_WIDTH 1 + #define EE_WR_TIMER_ACTIVE_LBN 28 + #define EE_WR_TIMER_ACTIVE_WIDTH 1 + #define EE_SPI_HCMD_SF_SEL_LBN 24 + #define EE_SPI_HCMD_SF_SEL_WIDTH 1 + #define EE_SPI_HCMD_DABCNT_LBN 16 + #define EE_SPI_HCMD_DABCNT_WIDTH 5 + #define EE_SPI_HCMD_READ_LBN 15 + #define EE_SPI_HCMD_READ_WIDTH 1 + #define EE_SPI_HCMD_DUBCNT_LBN 12 + #define EE_SPI_HCMD_DUBCNT_WIDTH 2 + #define EE_SPI_HCMD_ADBCNT_LBN 8 + #define EE_SPI_HCMD_ADBCNT_WIDTH 2 + #define EE_SPI_HCMD_ENC_LBN 0 + #define EE_SPI_HCMD_ENC_WIDTH 8 +#define EE_SPI_HADR_REG_KER_OFST 0X110 /* SPI host address register */ +#define EE_SPI_HADR_REG_OFST 0X110 /* SPI host address register */ + #define EE_SPI_HADR_DUBYTE_LBN 24 + #define EE_SPI_HADR_DUBYTE_WIDTH 8 + #define EE_SPI_HADR_ADR_LBN 0 + #define EE_SPI_HADR_ADR_WIDTH 24 +#define EE_SPI_HDATA_REG_KER_OFST 0x120 /* SPI host data register */ +#define EE_SPI_HDATA_REG_OFST 0x120 /* SPI host data register */ + #define EE_SPI_HDATA3_LBN 96 + #define EE_SPI_HDATA3_WIDTH 32 + #define EE_SPI_HDATA2_LBN 64 + #define EE_SPI_HDATA2_WIDTH 32 + #define EE_SPI_HDATA1_LBN 32 + #define EE_SPI_HDATA1_WIDTH 32 + #define EE_SPI_HDATA0_LBN 0 + #define EE_SPI_HDATA0_WIDTH 32 +#define EE_BASE_PAGE_REG_KER_OFST 0x130 /* Expansion ROM base mirror register */ +#define EE_BASE_PAGE_REG_OFST 0x130 /* Expansion ROM base mirror register */ + #define EE_EXP_ROM_WINDOW_BASE_LBN 16 + #define EE_EXP_ROM_WINDOW_BASE_WIDTH 13 + #define EE_EXPROM_MASK_LBN 0 + #define EE_EXPROM_MASK_WIDTH 13 +#define EE_VPD_CFG0_REG_KER_OFST 0X140 /* SPI/VPD configuration register */ +#define EE_VPD_CFG0_REG_OFST 0X140 /* SPI/VPD configuration register */ + #define EE_SF_FASTRD_EN_LBN 127 + #define EE_SF_FASTRD_EN_WIDTH 1 + #define EE_SF_CLOCK_DIV_LBN 120 + #define EE_SF_CLOCK_DIV_WIDTH 7 + #define EE_VPD_WIP_POLL_LBN 119 + #define EE_VPD_WIP_POLL_WIDTH 1 + #define EE_VPDW_LENGTH_LBN 80 + #define EE_VPDW_LENGTH_WIDTH 15 + #define EE_VPDW_BASE_LBN 64 + #define EE_VPDW_BASE_WIDTH 15 + #define EE_VPD_WR_CMD_EN_LBN 56 + #define EE_VPD_WR_CMD_EN_WIDTH 8 + #define EE_VPD_BASE_LBN 32 + #define EE_VPD_BASE_WIDTH 24 + #define EE_VPD_LENGTH_LBN 16 + #define EE_VPD_LENGTH_WIDTH 13 + #define EE_VPD_AD_SIZE_LBN 8 + #define EE_VPD_AD_SIZE_WIDTH 5 + #define EE_VPD_ACCESS_ON_LBN 5 + #define EE_VPD_ACCESS_ON_WIDTH 1 +#define EE_VPD_SW_CNTL_REG_KER_OFST 0X150 /* VPD access SW control register */ +#define EE_VPD_SW_CNTL_REG_OFST 0X150 /* VPD access SW control register */ + #define EE_VPD_CYCLE_PENDING_LBN 31 + #define EE_VPD_CYCLE_PENDING_WIDTH 1 + #define EE_VPD_CYC_WRITE_LBN 28 + #define EE_VPD_CYC_WRITE_WIDTH 1 + #define EE_VPD_CYC_ADR_LBN 0 + #define EE_VPD_CYC_ADR_WIDTH 15 +#define EE_VPD_SW_DATA_REG_KER_OFST 0x160 /* VPD access SW data register */ +#define EE_VPD_SW_DATA_REG_OFST 0x160 /* VPD access SW data register */ + #define EE_VPD_CYC_DAT_LBN 0 + #define EE_VPD_CYC_DAT_WIDTH 32 Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_desc.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_desc.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,75 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) descriptor + * definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +/*************---- Descriptors C Headers ----*************/ +/* Receive Kernel IP Descriptor */ + #define RX_KER_BUF_SIZE_LBN 48 + #define RX_KER_BUF_SIZE_WIDTH 14 + #define RX_KER_BUF_REGION_LBN 46 + #define RX_KER_BUF_REGION_WIDTH 2 + #define RX_KER_BUF_REGION0_DECODE 0 + #define RX_KER_BUF_REGION1_DECODE 1 + #define RX_KER_BUF_REGION2_DECODE 2 + #define RX_KER_BUF_REGION3_DECODE 3 + #define RX_KER_BUF_ADR_LBN 0 + #define RX_KER_BUF_ADR_WIDTH 46 +/* Receive User IP Descriptor */ + #define RX_USR_2BYTE_OFS_LBN 20 + #define RX_USR_2BYTE_OFS_WIDTH 12 + #define RX_USR_BUF_ID_LBN 0 + #define RX_USR_BUF_ID_WIDTH 20 +/* Transmit Kernel IP Descriptor */ + #define TX_KER_PORT_LBN 63 + #define TX_KER_PORT_WIDTH 1 + #define TX_KER_CONT_LBN 62 + #define TX_KER_CONT_WIDTH 1 + #define TX_KER_BYTE_CNT_LBN 48 + #define TX_KER_BYTE_CNT_WIDTH 14 + #define TX_KER_BUF_REGION_LBN 46 + #define TX_KER_BUF_REGION_WIDTH 2 + #define TX_KER_BUF_REGION0_DECODE 0 + #define TX_KER_BUF_REGION1_DECODE 1 + #define TX_KER_BUF_REGION2_DECODE 2 + #define TX_KER_BUF_REGION3_DECODE 3 + #define TX_KER_BUF_ADR_LBN 0 + #define TX_KER_BUF_ADR_WIDTH 46 +/* Transmit User IP Descriptor */ + #define TX_USR_PORT_LBN 47 + #define TX_USR_PORT_WIDTH 1 + #define TX_USR_CONT_LBN 46 + #define TX_USR_CONT_WIDTH 1 + #define TX_USR_BYTE_CNT_LBN 33 + #define TX_USR_BYTE_CNT_WIDTH 13 + #define TX_USR_BUF_ID_LBN 13 + #define TX_USR_BUF_ID_WIDTH 20 + #define TX_USR_BYTE_OFS_LBN 0 + #define TX_USR_BYTE_OFS_WIDTH 13 Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_event.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_event.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,155 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) event + * definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +/*************---- Events Format C Header ----*************/ +/*************---- Event entry ----*************/ + #define EV_CODE_LBN 60 + #define EV_CODE_WIDTH 4 + #define RX_IP_EV_DECODE 0 + #define TX_IP_EV_DECODE 2 + #define DRIVER_EV_DECODE 5 + #define GLOBAL_EV_DECODE 6 + #define DRV_GEN_EV_DECODE 7 + #define EV_DATA_LBN 0 + #define EV_DATA_WIDTH 60 +/******---- Receive IP events for both Kernel & User event queues ----******/ + #define RX_EV_PKT_OK_LBN 56 + #define RX_EV_PKT_OK_WIDTH 1 + #define RX_EV_BUF_OWNER_ID_ERR_LBN 54 + #define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 + #define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 + #define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 + #define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 + #define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 + #define RX_EV_ETH_CRC_ERR_LBN 50 + #define RX_EV_ETH_CRC_ERR_WIDTH 1 + #define RX_EV_FRM_TRUNC_LBN 49 + #define RX_EV_FRM_TRUNC_WIDTH 1 + #define RX_EV_DRIB_NIB_LBN 48 + #define RX_EV_DRIB_NIB_WIDTH 1 + #define RX_EV_TOBE_DISC_LBN 47 + #define RX_EV_TOBE_DISC_WIDTH 1 + #define RX_EV_PKT_TYPE_LBN 44 + #define RX_EV_PKT_TYPE_WIDTH 3 + #define RX_EV_PKT_TYPE_ETH_DECODE 0 + #define RX_EV_PKT_TYPE_LLC_DECODE 1 + #define RX_EV_PKT_TYPE_JUMBO_DECODE 2 + #define RX_EV_PKT_TYPE_VLAN_DECODE 3 + #define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4 + #define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5 + #define RX_EV_HDR_TYPE_LBN 42 + #define RX_EV_HDR_TYPE_WIDTH 2 + #define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0 + #define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1 + #define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2 + #define RX_EV_HDR_TYPE_NON_IP_DECODE 3 + #define RX_EV_DESC_Q_EMPTY_LBN 41 + #define RX_EV_DESC_Q_EMPTY_WIDTH 1 + #define RX_EV_MCAST_HASH_MATCH_LBN 40 + #define RX_EV_MCAST_HASH_MATCH_WIDTH 1 + #define RX_EV_MCAST_PKT_LBN 39 + #define RX_EV_MCAST_PKT_WIDTH 1 + #define RX_EV_Q_LABEL_LBN 32 + #define RX_EV_Q_LABEL_WIDTH 5 + #define RX_JUMBO_CONT_LBN 31 + #define RX_JUMBO_CONT_WIDTH 1 + #define RX_SOP_LBN 15 + #define RX_SOP_WIDTH 1 + #define RX_PORT_LBN 30 + #define RX_PORT_WIDTH 1 + #define RX_EV_BYTE_CNT_LBN 16 + #define RX_EV_BYTE_CNT_WIDTH 14 + #define RX_iSCSI_PKT_OK_LBN 14 + #define RX_iSCSI_PKT_OK_WIDTH 1 + #define RX_ISCSI_DDIG_ERR_LBN 13 + #define RX_ISCSI_DDIG_ERR_WIDTH 1 + #define RX_ISCSI_HDIG_ERR_LBN 12 + #define RX_ISCSI_HDIG_ERR_WIDTH 1 + #define RX_EV_DESC_PTR_LBN 0 + #define RX_EV_DESC_PTR_WIDTH 12 +/******---- Transmit IP events for both Kernel & User event queues ----******/ + #define TX_EV_PKT_ERR_LBN 38 + #define TX_EV_PKT_ERR_WIDTH 1 + #define TX_EV_PKT_TOO_BIG_LBN 37 + #define TX_EV_PKT_TOO_BIG_WIDTH 1 + #define TX_EV_Q_LABEL_LBN 32 + #define TX_EV_Q_LABEL_WIDTH 5 + #define TX_EV_PORT_LBN 16 + #define TX_EV_PORT_WIDTH 1 + #define TX_EV_WQ_FF_FULL_LBN 15 + #define TX_EV_WQ_FF_FULL_WIDTH 1 + #define TX_EV_BUF_OWNER_ID_ERR_LBN 14 + #define TX_EV_BUF_OWNER_ID_ERR_WIDTH 1 + #define TX_EV_COMP_LBN 12 + #define TX_EV_COMP_WIDTH 1 + #define TX_EV_DESC_PTR_LBN 0 + #define TX_EV_DESC_PTR_WIDTH 12 +/*************---- Char or Kernel driver events ----*************/ + #define DRIVER_EV_SUB_CODE_LBN 56 + #define DRIVER_EV_SUB_CODE_WIDTH 4 + #define TX_DESCQ_FLS_DONE_EV_DECODE 0x0 + #define RX_DESCQ_FLS_DONE_EV_DECODE 0x1 + #define EVQ_INIT_DONE_EV_DECODE 0x2 + #define EVQ_NOT_EN_EV_DECODE 0x3 + #define RX_DESCQ_FLSFF_OVFL_EV_DECODE 0x4 + #define SRM_UPD_DONE_EV_DECODE 0x5 + #define WAKE_UP_EV_DECODE 0x6 + #define TX_PKT_NON_TCP_UDP_DECODE 0x9 + #define TIMER_EV_DECODE 0xA + #define RX_DSC_ERROR_EV_DECODE 0xE + #define DRIVER_EV_TX_DESCQ_ID_LBN 0 + #define DRIVER_EV_TX_DESCQ_ID_WIDTH 12 + #define DRIVER_EV_RX_DESCQ_ID_LBN 0 + #define DRIVER_EV_RX_DESCQ_ID_WIDTH 12 + #define DRIVER_EV_EVQ_ID_LBN 0 + #define DRIVER_EV_EVQ_ID_WIDTH 12 + #define DRIVER_TMR_ID_LBN 0 + #define DRIVER_TMR_ID_WIDTH 12 + #define DRIVER_EV_SRM_UPD_LBN 0 + #define DRIVER_EV_SRM_UPD_WIDTH 2 + #define SRM_CLR_EV_DECODE 0 + #define SRM_UPD_EV_DECODE 1 + #define SRM_ILLCLR_EV_DECODE 2 +/********---- Global events. Sent to both event queue 0 and 4. ----********/ + #define XFP_PHY_INTR_LBN 10 + #define XFP_PHY_INTR_WIDTH 1 + #define XG_PHY_INTR_LBN 9 + #define XG_PHY_INTR_WIDTH 1 + #define G_PHY1_INTR_LBN 8 + #define G_PHY1_INTR_WIDTH 1 + #define G_PHY0_INTR_LBN 7 + #define G_PHY0_INTR_WIDTH 1 +/*************---- Driver generated events ----*************/ + #define DRV_GEN_EV_CODE_LBN 60 + #define DRV_GEN_EV_CODE_WIDTH 4 + #define DRV_GEN_EV_DATA_LBN 0 + #define DRV_GEN_EV_DATA_WIDTH 60 Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_grmon.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_grmon.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,129 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) 1G MAC + * counters. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +/*************---- 1G MAC Statistical Counters C Header ----*************/ +#define GRxGoodOct_offset 0x0 + #define GRxGoodOct_WIDTH 48 +#define GRxBadOct_offset 0x8 + #define GRxBadOct_WIDTH 48 +#define GRxMissPkt_offset 0x10 + #define GRxMissPkt_WIDTH 32 +#define GRxFalseCRS_offset 0x14 + #define GRxFalseCRS_WIDTH 32 +#define GRxPausePkt_offset 0x18 + #define GRxPausePkt_WIDTH 32 +#define GRxBadPkt_offset 0x1C + #define GRxBadPkt_WIDTH 32 +#define GRxUcastPkt_offset 0x20 + #define GRxUcastPkt_WIDTH 32 +#define GRxMcastPkt_offset 0x24 + #define GRxMcastPkt_WIDTH 32 +#define GRxBcastPkt_offset 0x28 + #define GRxBcastPkt_WIDTH 32 +#define GRxGoodLt64Pkt_offset 0x2C + #define GRxGoodLt64Pkt_WIDTH 32 +#define GRxBadLt64Pkt_offset 0x30 + #define GRxBadLt64Pkt_WIDTH 32 +#define GRx64Pkt_offset 0x34 + #define GRx64Pkt_WIDTH 32 +#define GRx65to127Pkt_offset 0x38 + #define GRx65to127Pkt_WIDTH 32 +#define GRx128to255Pkt_offset 0x3C + #define GRx128to255Pkt_WIDTH 32 +#define GRx256to511Pkt_offset 0x40 + #define GRx256to511Pkt_WIDTH 32 +#define GRx512to1023Pkt_offset 0x44 + #define GRx512to1023Pkt_WIDTH 32 +#define GRx1024to15xxPkt_offset 0x48 + #define GRx1024to15xxPkt_WIDTH 32 +#define GRx15xxtoJumboPkt_offset 0x4C + #define GRx15xxtoJumboPkt_WIDTH 32 +#define GRxGtJumboPkt_offset 0x50 + #define GRxGtJumboPkt_WIDTH 32 +#define GRxFcsErr64to15xxPkt_offset 0x54 + #define GRxFcsErr64to15xxPkt_WIDTH 32 +#define GRxFcsErr15xxtoJumboPkt_offset 0x58 + #define GRxFcsErr15xxtoJumboPkt_WIDTH 32 +#define GRxFcsErrGtJumboPkt_offset 0x5C + #define GRxFcsErrGtJumboPkt_WIDTH 32 +#define GTxGoodBadOct_offset 0x80 + #define GTxGoodBadOct_WIDTH 48 +#define GTxGoodOct_offset 0x88 + #define GTxGoodOct_WIDTH 48 +#define GTxSglColPkt_offset 0x90 + #define GTxSglColPkt_WIDTH 32 +#define GTxMultColPkt_offset 0x94 + #define GTxMultColPkt_WIDTH 32 +#define GTxExColPkt_offset 0x98 + #define GTxExColPkt_WIDTH 32 +#define GTxDefPkt_offset 0x9C + #define GTxDefPkt_WIDTH 32 +#define GTxLateCol_offset 0xA0 + #define GTxLateCol_WIDTH 32 +#define GTxExDefPkt_offset 0xA4 + #define GTxExDefPkt_WIDTH 32 +#define GTxPausePkt_offset 0xA8 + #define GTxPausePkt_WIDTH 32 +#define GTxBadPkt_offset 0xAC + #define GTxBadPkt_WIDTH 32 +#define GTxUcastPkt_offset 0xB0 + #define GTxUcastPkt_WIDTH 32 +#define GTxMcastPkt_offset 0xB4 + #define GTxMcastPkt_WIDTH 32 +#define GTxBcastPkt_offset 0xB8 + #define GTxBcastPkt_WIDTH 32 +#define GTxLt64Pkt_offset 0xBC + #define GTxLt64Pkt_WIDTH 32 +#define GTx64Pkt_offset 0xC0 + #define GTx64Pkt_WIDTH 32 +#define GTx65to127Pkt_offset 0xC4 + #define GTx65to127Pkt_WIDTH 32 +#define GTx128to255Pkt_offset 0xC8 + #define GTx128to255Pkt_WIDTH 32 +#define GTx256to511Pkt_offset 0xCC + #define GTx256to511Pkt_WIDTH 32 +#define GTx512to1023Pkt_offset 0xD0 + #define GTx512to1023Pkt_WIDTH 32 +#define GTx1024to15xxPkt_offset 0xD4 + #define GTx1024to15xxPkt_WIDTH 32 +#define GTx15xxtoJumboPkt_offset 0xD8 + #define GTx15xxtoJumboPkt_WIDTH 32 +#define GTxGtJumboPkt_offset 0xDC + #define GTxGtJumboPkt_WIDTH 32 +#define GTxNonTcpUdpPkt_offset 0xE0 + #define GTxNonTcpUdpPkt_WIDTH 16 +#define GTxMacSrcErrPkt_offset 0xE4 + #define GTxMacSrcErrPkt_WIDTH 16 +#define GTxIpSrcErrPkt_offset 0xE8 + #define GTxIpSrcErrPkt_WIDTH 16 +#define GDmaDone_offset 0xEC + #define GDmaDone_WIDTH 32 Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_intr_vec.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_intr_vec.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,44 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) interrupt + * vector definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +/*************---- Interrupt Vector Format C Header ----*************/ +#define DW0_OFST 0x0 /* Double-word 0: Event queue FIFO interrupts */ + #define EVQ_FIFO_HF_LBN 1 + #define EVQ_FIFO_HF_WIDTH 1 + #define EVQ_FIFO_AF_LBN 0 + #define EVQ_FIFO_AF_WIDTH 1 +#define DW1_OFST 0x4 /* Double-word 1: Interrupt indicator */ + #define INT_FLAG_LBN 0 + #define INT_FLAG_WIDTH 1 +#define DW2_OFST 0x8 /* Double-word 2: Fatal interrupts */ + #define FATAL_INT_LBN 0 + #define FATAL_INT_WIDTH 1 Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_mac.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_mac.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,711 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) MAC register + * definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +/*********---- 1G/10G Ethernet MAC Wrapper Registers C Header ----*********/ +#define MD_TXD_REG_KER_OFST 0xC00 /* PHY management transmit data register */ +#define MD_TXD_REG_OFST 0xC00 /* PHY management transmit data register */ + #define MD_TXD_LBN 0 + #define MD_TXD_WIDTH 16 +#define MD_RXD_REG_KER_OFST 0xC10 /* PHY management receive data register */ +#define MD_RXD_REG_OFST 0xC10 /* PHY management receive data register */ + #define MD_RXD_LBN 0 + #define MD_RXD_WIDTH 16 +#define MD_CS_REG_KER_OFST 0xC20 /* PHY management configuration & + status register */ +#define MD_CS_REG_OFST 0xC20 /* PHY management configuration & + status register */ + #define MD_PT_LBN 7 + #define MD_PT_WIDTH 3 + #define MD_PL_LBN 6 + #define MD_PL_WIDTH 1 + #define MD_INT_CLR_LBN 5 + #define MD_INT_CLR_WIDTH 1 + #define MD_GC_LBN 4 + #define MD_GC_WIDTH 1 + #define MD_PRSP_LBN 3 + #define MD_PRSP_WIDTH 1 + #define MD_RIC_LBN 2 + #define MD_RIC_WIDTH 1 + #define MD_RDC_LBN 1 + #define MD_RDC_WIDTH 1 + #define MD_WRC_LBN 0 + #define MD_WRC_WIDTH 1 +#define MD_PHY_ADR_REG_KER_OFST 0xC30 /* PHY management PHY address register */ +#define MD_PHY_ADR_REG_OFST 0xC30 /* PHY management PHY address register */ + #define MD_PHY_ADR_LBN 0 + #define MD_PHY_ADR_WIDTH 16 +#define MD_ID_REG_KER_OFST 0xC40 /* PHY management ID register */ +#define MD_ID_REG_OFST 0xC40 /* PHY management ID register */ + #define MD_PRT_ADR_LBN 11 + #define MD_PRT_ADR_WIDTH 5 + #define MD_DEV_ADR_LBN 6 + #define MD_DEV_ADR_WIDTH 5 +#define MD_STAT_REG_KER_OFST 0xC50 /* PHY management status & mask register */ +#define MD_STAT_REG_OFST 0xC50 /* PHY management status & mask register */ + #define MD_PINT_LBN 4 + #define MD_PINT_WIDTH 1 + #define MD_DONE_LBN 3 + #define MD_DONE_WIDTH 1 + #define MD_BSERR_LBN 2 + #define MD_BSERR_WIDTH 1 + #define MD_LNFL_LBN 1 + #define MD_LNFL_WIDTH 1 + #define MD_BSY_LBN 0 + #define MD_BSY_WIDTH 1 +#define MAC0_STAT_DMA_REG_KER_OFST 0xC60 /* Port 0 MAC statistical counter + DMA register */ +#define MAC0_STAT_DMA_REG_OFST 0xC60 /* Port 0 MAC statistical counter + DMA register */ + #define MAC0_STAT_DMA_CMD_LBN 48 + #define MAC0_STAT_DMA_CMD_WIDTH 1 + #define MAC0_STAT_DMA_ADR_LBN 0 + #define MAC0_STAT_DMA_ADR_WIDTH 48 +#define MAC1_STAT_DMA_REG_KER_OFST 0xC70 /* Port 1 MAC statistical counter + DMA register */ +#define MAC1_STAT_DMA_REG_OFST 0xC70 /* Port 1 MAC statistical counter + DMA register */ + #define MAC1_STAT_DMA_CMD_LBN 48 + #define MAC1_STAT_DMA_CMD_WIDTH 1 + #define MAC1_STAT_DMA_ADR_LBN 0 + #define MAC1_STAT_DMA_ADR_WIDTH 48 +#define MAC0_CTRL_REG_KER_OFST 0xC80 /* Port 0 MAC control register */ +#define MAC0_CTRL_REG_OFST 0xC80 /* Port 0 MAC control register */ + #define MAC0_XOFF_VAL_LBN 16 + #define MAC0_XOFF_VAL_WIDTH 16 + #define MAC0_BCAD_ACPT_LBN 4 + #define MAC0_BCAD_ACPT_WIDTH 1 + #define MAC0_UC_PROM_LBN 3 + #define MAC0_UC_PROM_WIDTH 1 + #define MAC0_LINK_STATUS_LBN 2 + #define MAC0_LINK_STATUS_WIDTH 1 + #define MAC0_SPEED_LBN 0 + #define MAC0_SPEED_WIDTH 2 +#define MAC1_CTRL_REG_KER_OFST 0xC90 /* Port 1 MAC control register */ +#define MAC1_CTRL_REG_OFST 0xC90 /* Port 1 MAC control register */ + #define MAC1_XOFF_VAL_LBN 16 + #define MAC1_XOFF_VAL_WIDTH 16 + #define MAC1_BCAD_ACPT_LBN 4 + #define MAC1_BCAD_ACPT_WIDTH 1 + #define MAC1_UC_PROM_LBN 3 + #define MAC1_UC_PROM_WIDTH 1 + #define MAC1_LINK_STATUS_LBN 2 + #define MAC1_LINK_STATUS_WIDTH 1 + #define MAC1_SPEED_LBN 0 + #define MAC1_SPEED_WIDTH 2 +#define MAC_MC_HASH_REG0_KER_OFST 0xCA0 /* Multicast address hash table */ +#define MAC_MC_HASH_REG0_OFST 0xCA0 /* Multicast address hash table */ + #define MAC_MCAST_HASH0_LBN 0 + #define MAC_MCAST_HASH0_WIDTH 128 +#define MAC_MC_HASH_REG1_KER_OFST 0xCB0 /* Multicast address hash table */ +#define MAC_MC_HASH_REG1_OFST 0xCB0 /* Multicast address hash table */ + #define MAC_MCAST_HASH1_LBN 0 + #define MAC_MCAST_HASH1_WIDTH 128 +/*************---- 1G MAC Port 0 Registers C Header ----*************/ +#define GM_P0_BASE 0xE00 +#define GM_P1_BASE 0x1000 +#define GM_CFG1_REG_KER_OFST 0x00 /* GMAC configuration register 1 */ +#define GM_CFG1_REG_OFST 0x00 /* GMAC configuration register 1 */ + #define GM_SW_RST_LBN 31 + #define GM_SW_RST_WIDTH 1 + #define GM_SIM_RST_LBN 30 + #define GM_SIM_RST_WIDTH 1 + #define GM_RST_RX_MAC_CTL_LBN 19 + #define GM_RST_RX_MAC_CTL_WIDTH 1 + #define GM_RST_TX_MAC_CTL_LBN 18 + #define GM_RST_TX_MAC_CTL_WIDTH 1 + #define GM_RST_RX_FUNC_LBN 17 + #define GM_RST_RX_FUNC_WIDTH 1 + #define GM_RST_TX_FUNC_LBN 16 + #define GM_RST_TX_FUNC_WIDTH 1 + #define GM_LOOP_LBN 8 + #define GM_LOOP_WIDTH 1 + #define GM_RX_FC_EN_LBN 5 + #define GM_RX_FC_EN_WIDTH 1 + #define GM_TX_FC_EN_LBN 4 + #define GM_TX_FC_EN_WIDTH 1 + #define GM_SYNC_RXEN_LBN 3 + #define GM_SYNC_RXEN_WIDTH 1 + #define GM_RX_EN_LBN 2 + #define GM_RX_EN_WIDTH 1 + #define GM_SYNC_TXEN_LBN 1 + #define GM_SYNC_TXEN_WIDTH 1 + #define GM_TX_EN_LBN 0 + #define GM_TX_EN_WIDTH 1 +#define GM_CFG2_REG_KER_OFST 0x10 /* GMAC configuration register 2 */ +#define GM_CFG2_REG_OFST 0x10 /* GMAC configuration register 2 */ + #define GM_PAMBL_LEN_LBN 12 + #define GM_PAMBL_LEN_WIDTH 4 + #define GM_IF_MODE_LBN 8 + #define GM_IF_MODE_WIDTH 2 + #define GM_HUGE_FRM_EN_LBN 5 + #define GM_HUGE_FRM_EN_WIDTH 1 + #define GM_LEN_CHK_LBN 4 + #define GM_LEN_CHK_WIDTH 1 + #define GM_PAD_CRC_EN_LBN 2 + #define GM_PAD_CRC_EN_WIDTH 1 + #define GM_CRC_EN_LBN 1 + #define GM_CRC_EN_WIDTH 1 + #define GM_FD_LBN 0 + #define GM_FD_WIDTH 1 +#define GM_IPG_REG_KER_OFST 0x20 /* GMAC IPG register */ +#define GM_IPG_REG_OFST 0x20 /* GMAC IPG register */ + #define GM_NONB2B_IPG1_LBN 24 + #define GM_NONB2B_IPG1_WIDTH 7 + #define GM_NONB2B_IPG2_LBN 16 + #define GM_NONB2B_IPG2_WIDTH 7 + #define GM_MIN_IPG_ENF_LBN 8 + #define GM_MIN_IPG_ENF_WIDTH 8 + #define GM_B2B_IPG_LBN 0 + #define GM_B2B_IPG_WIDTH 7 +#define GM_HD_REG_KER_OFST 0x30 /* GMAC half duplex register */ +#define GM_HD_REG_OFST 0x30 /* GMAC half duplex register */ + #define GM_ALT_BOFF_VAL_LBN 20 + #define GM_ALT_BOFF_VAL_WIDTH 4 + #define GM_ALT_BOFF_EN_LBN 19 + #define GM_ALT_BOFF_EN_WIDTH 1 + #define GM_BP_NO_BOFF_LBN 18 + #define GM_BP_NO_BOFF_WIDTH 1 + #define GM_DIS_BOFF_LBN 17 + #define GM_DIS_BOFF_WIDTH 1 + #define GM_EXDEF_TX_EN_LBN 16 + #define GM_EXDEF_TX_EN_WIDTH 1 + #define GM_RTRY_LIMIT_LBN 12 + #define GM_RTRY_LIMIT_WIDTH 4 + #define GM_COL_WIN_LBN 0 + #define GM_COL_WIN_WIDTH 10 +#define GM_MAX_FLEN_REG_KER_OFST 0x40 /* GMAC maximum frame length register */ +#define GM_MAX_FLEN_REG_OFST 0x40 /* GMAC maximum frame length register */ + #define GM_MAX_FLEN_LBN 0 + #define GM_MAX_FLEN_WIDTH 16 +#define GM_TEST_REG_KER_OFST 0x70 /* GMAC test register */ +#define GM_TEST_REG_OFST 0x70 /* GMAC test register */ + #define GM_MAX_BOFF_LBN 3 + #define GM_MAX_BOFF_WIDTH 1 + #define GM_REG_TX_FLOW_EN_LBN 2 + #define GM_REG_TX_FLOW_EN_WIDTH 1 + #define GM_TEST_PAUSE_LBN 1 + #define GM_TEST_PAUSE_WIDTH 1 + #define GM_SHORT_SLOT_LBN 0 + #define GM_SHORT_SLOT_WIDTH 1 +#define GM_ADR1_REG_KER_OFST 0x100 /* GMAC station address register 1 */ +#define GM_ADR1_REG_OFST 0x100 /* GMAC station address register 1 */ + #define GM_ADR1_LBN 0 + #define GM_ADR1_WIDTH 32 +#define GM_ADR2_REG_KER_OFST 0x110 /* GMAC station address register 2 */ +#define GM_ADR2_REG_OFST 0x110 /* GMAC station address register 2 */ + #define GM_ADR2_LBN 16 + #define GM_ADR2_WIDTH 16 +#define GMF_CFG0_REG_KER_OFST 0x120 /* GMAC FIFO configuration register 0 */ +#define GMF_CFG0_REG_OFST 0x120 /* GMAC FIFO configuration register 0 */ + #define GMF_FTFENRPLY_LBN 20 + #define GMF_FTFENRPLY_WIDTH 1 + #define GMF_STFENRPLY_LBN 19 + #define GMF_STFENRPLY_WIDTH 1 + #define GMF_FRFENRPLY_LBN 18 + #define GMF_FRFENRPLY_WIDTH 1 + #define GMF_SRFENRPLY_LBN 17 + #define GMF_SRFENRPLY_WIDTH 1 + #define GMF_WTMENRPLY_LBN 16 + #define GMF_WTMENRPLY_WIDTH 1 + #define GMF_FTFENREQ_LBN 12 + #define GMF_FTFENREQ_WIDTH 1 + #define GMF_STFENREQ_LBN 11 + #define GMF_STFENREQ_WIDTH 1 + #define GMF_FRFENREQ_LBN 10 + #define GMF_FRFENREQ_WIDTH 1 + #define GMF_SRFENREQ_LBN 9 + #define GMF_SRFENREQ_WIDTH 1 + #define GMF_WTMENREQ_LBN 8 + #define GMF_WTMENREQ_WIDTH 1 + #define GMF_HSTRSTFT_LBN 4 + #define GMF_HSTRSTFT_WIDTH 1 + #define GMF_HSTRSTST_LBN 3 + #define GMF_HSTRSTST_WIDTH 1 + #define GMF_HSTRSTFR_LBN 2 + #define GMF_HSTRSTFR_WIDTH 1 + #define GMF_HSTRSTSR_LBN 1 + #define GMF_HSTRSTSR_WIDTH 1 + #define GMF_HSTRSTWT_LBN 0 + #define GMF_HSTRSTWT_WIDTH 1 +#define GMF_CFG1_REG_KER_OFST 0x130 /* GMAC FIFO configuration register 1 */ +#define GMF_CFG1_REG_OFST 0x130 /* GMAC FIFO configuration register 1 */ + #define GMF_CFGFRTH_LBN 16 + #define GMF_CFGFRTH_WIDTH 5 + #define GMF_CFGXOFFRTX_LBN 0 + #define GMF_CFGXOFFRTX_WIDTH 16 +#define GMF_CFG2_REG_KER_OFST 0x140 /* GMAC FIFO configuration register 2 */ +#define GMF_CFG2_REG_OFST 0x140 /* GMAC FIFO configuration register 2 */ + #define GMF_CFGHWM_LBN 16 + #define GMF_CFGHWM_WIDTH 6 + #define GMF_CFGLWM_LBN 0 + #define GMF_CFGLWM_WIDTH 6 +#define GMF_CFG3_REG_KER_OFST 0x150 /* GMAC FIFO configuration register 3 */ +#define GMF_CFG3_REG_OFST 0x150 /* GMAC FIFO configuration register 3 */ + #define GMF_CFGHWMFT_LBN 16 + #define GMF_CFGHWMFT_WIDTH 6 + #define GMF_CFGFTTH_LBN 0 + #define GMF_CFGFTTH_WIDTH 6 +#define GMF_CFG4_REG_KER_OFST 0x160 /* GMAC FIFO configuration register 4 */ +#define GMF_CFG4_REG_OFST 0x160 /* GMAC FIFO configuration register 4 */ + #define GMF_HSTFLTRFRM_LBN 0 + #define GMF_HSTFLTRFRM_WIDTH 18 +#define GMF_CFG5_REG_KER_OFST 0x170 /* GMAC FIFO configuration register 5 */ +#define GMF_CFG5_REG_OFST 0x170 /* GMAC FIFO configuration register 5 */ + #define GMF_CFGHDPLX_LBN 22 + #define GMF_CFGHDPLX_WIDTH 1 + #define GMF_SRFULL_LBN 21 + #define GMF_SRFULL_WIDTH 1 + #define GMF_HSTSRFULLCLR_LBN 20 + #define GMF_HSTSRFULLCLR_WIDTH 1 + #define GMF_CFGBYTMODE_LBN 19 + #define GMF_CFGBYTMODE_WIDTH 1 + #define GMF_HSTDRPLT64_LBN 18 + #define GMF_HSTDRPLT64_WIDTH 1 + #define GMF_HSTFLTRFRMDC_LBN 0 + #define GMF_HSTFLTRFRMDC_WIDTH 18 +/*************---- 10G MAC Registers C Header ----*************/ +#define XM_ADR_LO_REG_KER_P0_OFST 0x1200 /* XGMAC address register low - + port 0 */ +#define XM_ADR_LO_REG_P0_OFST 0x1200 /* XGMAC address register low - + port 0 */ + #define XM_ADR_LO_LBN 0 + #define XM_ADR_LO_WIDTH 32 +#define XM_ADR_HI_REG_KER_P0_OFST 0x1210 /* XGMAC address register high - + port 0 */ +#define XM_ADR_HI_REG_P0_OFST 0x1210 /* XGMAC address register high - + port 0 */ + #define XM_ADR_HI_LBN 0 + #define XM_ADR_HI_WIDTH 16 +#define XM_GLB_CFG_REG_KER_P0_OFST 0x1220 /* XGMAC global configuration - + port 0 */ +#define XM_GLB_CFG_REG_P0_OFST 0x1220 /* XGMAC global configuration - + port 0 */ + #define XM_LINE_LB_DEEP_RSVD_LBN 28 + #define XM_LINE_LB_DEEP_RSVD_WIDTH 1 + #define XM_RMTFLT_GEN_LBN 17 + #define XM_RMTFLT_GEN_WIDTH 1 + #define XM_DEBUG_MODE_LBN 16 + #define XM_DEBUG_MODE_WIDTH 1 + #define XM_RX_STAT_EN_LBN 11 + #define XM_RX_STAT_EN_WIDTH 1 + #define XM_TX_STAT_EN_LBN 10 + #define XM_TX_STAT_EN_WIDTH 1 + #define XM_CUT_THRU_MODE_LBN 7 + #define XM_CUT_THRU_MODE_WIDTH 1 + #define XM_RX_JUMBO_MODE_LBN 6 + #define XM_RX_JUMBO_MODE_WIDTH 1 + #define XM_WAN_MODE_LBN 5 + #define XM_WAN_MODE_WIDTH 1 + #define XM_AUTOCLR_MODE_LBN 4 + #define XM_AUTOCLR_MODE_WIDTH 1 + #define XM_INTCLR_MODE_LBN 3 + #define XM_INTCLR_MODE_WIDTH 1 + #define XM_CORE_RST_LBN 0 + #define XM_CORE_RST_WIDTH 1 +#define XM_TX_CFG_REG_KER_P0_OFST 0x1230 /* XGMAC transmit configuration - + port 0 */ +#define XM_TX_CFG_REG_P0_OFST 0x1230 /* XGMAC transmit configuration - + port 0 */ + #define XM_TX_PROG_LBN 24 + #define XM_TX_PROG_WIDTH 1 + #define XM_IPG_LBN 16 + #define XM_IPG_WIDTH 4 + #define XM_FCNTL_LBN 10 + #define XM_FCNTL_WIDTH 1 + #define XM_TXCRC_LBN 8 + #define XM_TXCRC_WIDTH 1 + #define XM_EDRC_LBN 6 + #define XM_EDRC_WIDTH 1 + #define XM_AUTO_PAD_LBN 5 + #define XM_AUTO_PAD_WIDTH 1 + #define XM_TX_PRMBL_LBN 2 + #define XM_TX_PRMBL_WIDTH 1 + #define XM_TXEN_LBN 1 + #define XM_TXEN_WIDTH 1 + #define XM_TX_RST_LBN 0 + #define XM_TX_RST_WIDTH 1 +#define XM_RX_CFG_REG_KER_P0_OFST 0x1240 /* XGMAC receive configuration - + port 0 */ +#define XM_RX_CFG_REG_P0_OFST 0x1240 /* XGMAC receive configuration - + port 0 */ + #define XM_PASS_LENERR_LBN 26 + #define XM_PASS_LENERR_WIDTH 1 + #define XM_PASS_CRC_ERR_LBN 25 + #define XM_PASS_CRC_ERR_WIDTH 1 + #define XM_PASS_PRMBLE_ERR_LBN 24 + #define XM_PASS_PRMBLE_ERR_WIDTH 1 + #define XM_REJ_UCAST_LBN 18 + #define XM_REJ_UCAST_WIDTH 1 + #define XM_BSC_EN_LBN 17 + #define XM_BSC_EN_WIDTH 1 + #define XM_ACPT_ALL_MCAST_LBN 11 + #define XM_ACPT_ALL_MCAST_WIDTH 1 + #define XM_PASS_SAP_LBN 10 + #define XM_PASS_SAP_WIDTH 1 + #define XM_ACPT_ALL_UCAST_LBN 9 + #define XM_ACPT_ALL_UCAST_WIDTH 1 + #define XM_AUTO_DEPAD_LBN 8 + #define XM_AUTO_DEPAD_WIDTH 1 + #define XM_RXCRC_LBN 3 + #define XM_RXCRC_WIDTH 1 + #define XM_RX_PRMBL_LBN 2 + #define XM_RX_PRMBL_WIDTH 1 + #define XM_RXEN_LBN 1 + #define XM_RXEN_WIDTH 1 + #define XM_RX_RST_LBN 0 + #define XM_RX_RST_WIDTH 1 +#define XM_FC_REG_KER_P0_OFST 0x1270 /* XGMAC flow control register - + port 0 */ +#define XM_FC_REG_P0_OFST 0x1270 /* XGMAC flow control register - + port 0 */ + #define XM_PAUSE_TIME_LBN 16 + #define XM_PAUSE_TIME_WIDTH 16 + #define XM_RX_MAC_STAT_LBN 11 + #define XM_RX_MAC_STAT_WIDTH 1 + #define XM_TX_MAC_STAT_LBN 10 + #define XM_TX_MAC_STAT_WIDTH 1 + #define XM_MCNTL_PASS_LBN 8 + #define XM_MCNTL_PASS_WIDTH 2 + #define XM_REJ_CNTL_UCAST_LBN 6 + #define XM_REJ_CNTL_UCAST_WIDTH 1 + #define XM_REJ_CNTL_MCAST_LBN 5 + #define XM_REJ_CNTL_MCAST_WIDTH 1 + #define XM_AUTO_XMIT_ZPAUSE_LBN 4 + #define XM_AUTO_XMIT_ZPAUSE_WIDTH 1 + #define XM_AUTO_XMIT_PAUSE_LBN 3 + #define XM_AUTO_XMIT_PAUSE_WIDTH 1 + #define XM_ZPAUSE_LBN 2 + #define XM_ZPAUSE_WIDTH 1 + #define XM_XMIT_PAUSE_LBN 1 + #define XM_XMIT_PAUSE_WIDTH 1 + #define XM_DIS_FCNTL_LBN 0 + #define XM_DIS_FCNTL_WIDTH 1 +#define XM_PAUSE_TIME_REG_KER_P0_OFST 0x1290 /* XGMAC pause time register - + port 0 */ +#define XM_PAUSE_TIME_REG_P0_OFST 0x1290 /* XGMAC pause time register - + port 0 */ + #define XM_TX_PAUSE_CNT_LBN 16 + #define XM_TX_PAUSE_CNT_WIDTH 16 + #define XM_RX_PAUSE_CNT_LBN 0 + #define XM_RX_PAUSE_CNT_WIDTH 16 +#define XM_TX_PARAM_REG_KER_P0_OFST 0x12D0 /* XGMAC transmit parameter + register - port 0 */ +#define XM_TX_PARAM_REG_P0_OFST 0x12D0 /* XGMAC transmit parameter register - + port 0 */ + #define XM_TX_JUMBO_MODE_LBN 31 + #define XM_TX_JUMBO_MODE_WIDTH 1 + #define XM_MAX_TX_FRM_SIZE_LBN 16 + #define XM_MAX_TX_FRM_SIZE_WIDTH 14 + #define XM_PAD_CHAR_LBN 0 + #define XM_PAD_CHAR_WIDTH 8 +#define XM_RX_PARAM_REG_KER_P0_OFST 0x12E0 /* XGMAC receive parameter + register - port 0 */ +#define XM_RX_PARAM_REG_P0_OFST 0x12E0 /* XGMAC receive parameter register - + port 0 */ + #define XM_MAX_RX_FRM_SIZE_LBN 0 + #define XM_MAX_RX_FRM_SIZE_WIDTH 14 +#define XX_PWR_RST_REG_KER_P0_OFST 0x1300 /* XGXS/XAUI powerdown/reset + register */ +#define XX_PWR_RST_REG_P0_OFST 0x1300 /* XGXS/XAUI powerdown/reset register */ + #define XX_PWRDND_SIG_LBN 31 + #define XX_PWRDND_SIG_WIDTH 1 + #define XX_PWRDNC_SIG_LBN 30 + #define XX_PWRDNC_SIG_WIDTH 1 + #define XX_PWRDNB_SIG_LBN 29 + #define XX_PWRDNB_SIG_WIDTH 1 + #define XX_PWRDNA_SIG_LBN 28 + #define XX_PWRDNA_SIG_WIDTH 1 + #define XX_SIM_MODE_LBN 27 + #define XX_SIM_MODE_WIDTH 1 + #define XX_RSTPLLCD_SIG_LBN 25 + #define XX_RSTPLLCD_SIG_WIDTH 1 + #define XX_RSTPLLAB_SIG_LBN 24 + #define XX_RSTPLLAB_SIG_WIDTH 1 + #define XX_RESETD_SIG_LBN 23 + #define XX_RESETD_SIG_WIDTH 1 + #define XX_RESETC_SIG_LBN 22 + #define XX_RESETC_SIG_WIDTH 1 + #define XX_RESETB_SIG_LBN 21 + #define XX_RESETB_SIG_WIDTH 1 + #define XX_RESETA_SIG_LBN 20 + #define XX_RESETA_SIG_WIDTH 1 + #define XX_RSTXGXSTX_SIG_LBN 18 + #define XX_RSTXGXSTX_SIG_WIDTH 1 + #define XX_RSTXGXSRX_SIG_LBN 17 + #define XX_RSTXGXSRX_SIG_WIDTH 1 + #define XX_SD_RST_ACT_LBN 16 + #define XX_SD_RST_ACT_WIDTH 1 + #define XX_PWRDND_EN_LBN 15 + #define XX_PWRDND_EN_WIDTH 1 + #define XX_PWRDNC_EN_LBN 14 + #define XX_PWRDNC_EN_WIDTH 1 + #define XX_PWRDNB_EN_LBN 13 + #define XX_PWRDNB_EN_WIDTH 1 + #define XX_PWRDNA_EN_LBN 12 + #define XX_PWRDNA_EN_WIDTH 1 + #define XX_RSTPLLCD_EN_LBN 9 + #define XX_RSTPLLCD_EN_WIDTH 1 + #define XX_RSTPLLAB_EN_LBN 8 + #define XX_RSTPLLAB_EN_WIDTH 1 + #define XX_RESETD_EN_LBN 7 + #define XX_RESETD_EN_WIDTH 1 + #define XX_RESETC_EN_LBN 6 + #define XX_RESETC_EN_WIDTH 1 + #define XX_RESETB_EN_LBN 5 + #define XX_RESETB_EN_WIDTH 1 + #define XX_RESETA_EN_LBN 4 + #define XX_RESETA_EN_WIDTH 1 + #define XX_RSTXGXSTX_EN_LBN 2 + #define XX_RSTXGXSTX_EN_WIDTH 1 + #define XX_RSTXGXSRX_EN_LBN 1 + #define XX_RSTXGXSRX_EN_WIDTH 1 + #define XX_RST_XX_EN_LBN 0 + #define XX_RST_XX_EN_WIDTH 1 +#define XX_SD_CTL_REG_KER_P0_OFST 0x1310 /* XGXS/XAUI powerdown/reset control + register */ +#define XX_SD_CTL_REG_P0_OFST 0x1310 /* XGXS/XAUI powerdown/reset control + register */ + #define XX_TERMADJ1_LBN 17 + #define XX_TERMADJ1_WIDTH 1 + #define XX_TERMADJ0_LBN 16 + #define XX_TERMADJ0_WIDTH 1 + #define XX_HIDRVD_LBN 15 + #define XX_HIDRVD_WIDTH 1 + #define XX_LODRVD_LBN 14 + #define XX_LODRVD_WIDTH 1 + #define XX_HIDRVC_LBN 13 + #define XX_HIDRVC_WIDTH 1 + #define XX_LODRVC_LBN 12 + #define XX_LODRVC_WIDTH 1 + #define XX_HIDRVB_LBN 11 + #define XX_HIDRVB_WIDTH 1 + #define XX_LODRVB_LBN 10 + #define XX_LODRVB_WIDTH 1 + #define XX_HIDRVA_LBN 9 + #define XX_HIDRVA_WIDTH 1 + #define XX_LODRVA_LBN 8 + #define XX_LODRVA_WIDTH 1 + #define XX_LPBKD_LBN 3 + #define XX_LPBKD_WIDTH 1 + #define XX_LPBKC_LBN 2 + #define XX_LPBKC_WIDTH 1 + #define XX_LPBKB_LBN 1 + #define XX_LPBKB_WIDTH 1 + #define XX_LPBKA_LBN 0 + #define XX_LPBKA_WIDTH 1 +#define XX_TXDRV_CTL_REG_KER_P0_OFST 0x1320 /* XAUI SerDes transmit drive + control register */ +#define XX_TXDRV_CTL_REG_P0_OFST 0x1320 /* XAUI SerDes transmit drive + control register */ + #define XX_DEQD_LBN 28 + #define XX_DEQD_WIDTH 4 + #define XX_DEQC_LBN 24 + #define XX_DEQC_WIDTH 4 + #define XX_DEQB_LBN 20 + #define XX_DEQB_WIDTH 4 + #define XX_DEQA_LBN 16 + #define XX_DEQA_WIDTH 4 + #define XX_DTXD_LBN 12 + #define XX_DTXD_WIDTH 4 + #define XX_DTXC_LBN 8 + #define XX_DTXC_WIDTH 4 + #define XX_DTXB_LBN 4 + #define XX_DTXB_WIDTH 4 + #define XX_DTXA_LBN 0 + #define XX_DTXA_WIDTH 4 +#define XX_PRBS_CTL_REG_KER_P0_OFST 0x1330 /* XAUI PRBS control register */ +#define XX_PRBS_CTL_REG_P0_OFST 0x1330 /* XAUI PRBS control register */ + #define XX_CH3_RX_PRBS_SEL_LBN 30 + #define XX_CH3_RX_PRBS_SEL_WIDTH 2 + #define XX_CH3_RX_PRBS_INV_LBN 29 + #define XX_CH3_RX_PRBS_INV_WIDTH 1 + #define XX_CH3_RX_PRBS_CHKEN_LBN 28 + #define XX_CH3_RX_PRBS_CHKEN_WIDTH 1 + #define XX_CH2_RX_PRBS_SEL_LBN 26 + #define XX_CH2_RX_PRBS_SEL_WIDTH 2 + #define XX_CH2_RX_PRBS_INV_LBN 25 + #define XX_CH2_RX_PRBS_INV_WIDTH 1 + #define XX_CH2_RX_PRBS_CHKEN_LBN 24 + #define XX_CH2_RX_PRBS_CHKEN_WIDTH 1 + #define XX_CH1_RX_PRBS_SEL_LBN 22 + #define XX_CH1_RX_PRBS_SEL_WIDTH 2 + #define XX_CH1_RX_PRBS_INV_LBN 21 + #define XX_CH1_RX_PRBS_INV_WIDTH 1 + #define XX_CH1_RX_PRBS_CHKEN_LBN 20 + #define XX_CH1_RX_PRBS_CHKEN_WIDTH 1 + #define XX_CH0_RX_PRBS_SEL_LBN 18 + #define XX_CH0_RX_PRBS_SEL_WIDTH 2 + #define XX_CH0_RX_PRBS_INV_LBN 17 + #define XX_CH0_RX_PRBS_INV_WIDTH 1 + #define XX_CH0_RX_PRBS_CHKEN_LBN 16 + #define XX_CH0_RX_PRBS_CHKEN_WIDTH 1 + #define XX_CH3_TX_PRBS_SEL_LBN 14 + #define XX_CH3_TX_PRBS_SEL_WIDTH 2 + #define XX_CH3_TX_PRBS_INV_LBN 13 + #define XX_CH3_TX_PRBS_INV_WIDTH 1 + #define XX_CH3_TX_PRBS_CHKEN_LBN 12 + #define XX_CH3_TX_PRBS_CHKEN_WIDTH 1 + #define XX_CH2_TX_PRBS_SEL_LBN 10 + #define XX_CH2_TX_PRBS_SEL_WIDTH 2 + #define XX_CH2_TX_PRBS_INV_LBN 9 + #define XX_CH2_TX_PRBS_INV_WIDTH 1 + #define XX_CH2_TX_PRBS_CHKEN_LBN 8 + #define XX_CH2_TX_PRBS_CHKEN_WIDTH 1 + #define XX_CH1_TX_PRBS_SEL_LBN 6 + #define XX_CH1_TX_PRBS_SEL_WIDTH 2 + #define XX_CH1_TX_PRBS_INV_LBN 5 + #define XX_CH1_TX_PRBS_INV_WIDTH 1 + #define XX_CH1_TX_PRBS_CHKEN_LBN 4 + #define XX_CH1_TX_PRBS_CHKEN_WIDTH 1 + #define XX_CH0_TX_PRBS_SEL_LBN 2 + #define XX_CH0_TX_PRBS_SEL_WIDTH 2 + #define XX_CH0_TX_PRBS_INV_LBN 1 + #define XX_CH0_TX_PRBS_INV_WIDTH 1 + #define XX_CH0_TX_PRBS_CHKEN_LBN 0 + #define XX_CH0_TX_PRBS_CHKEN_WIDTH 1 +#define XX_PRBS_CHK_REG_KER_P0_OFST 0x1340 /* XAUI PRBS checker control + register */ +#define XX_PRBS_CHK_REG_P0_OFST 0x1340 /* XAUI PRBS checker control + register */ + #define XX_REV_LB_EN_LBN 16 + #define XX_REV_LB_EN_WIDTH 1 + #define XX_CH3_DEG_DET_LBN 15 + #define XX_CH3_DEG_DET_WIDTH 1 + #define XX_CH3_LFSR_LOCK_IND_LBN 14 + #define XX_CH3_LFSR_LOCK_IND_WIDTH 1 + #define XX_CH3_PRBS_FRUN_LBN 13 + #define XX_CH3_PRBS_FRUN_WIDTH 1 + #define XX_CH3_ERR_CHK_LBN 12 + #define XX_CH3_ERR_CHK_WIDTH 1 + #define XX_CH2_DEG_DET_LBN 11 + #define XX_CH2_DEG_DET_WIDTH 1 + #define XX_CH2_LFSR_LOCK_IND_LBN 10 + #define XX_CH2_LFSR_LOCK_IND_WIDTH 1 + #define XX_CH2_PRBS_FRUN_LBN 9 + #define XX_CH2_PRBS_FRUN_WIDTH 1 + #define XX_CH2_ERR_CHK_LBN 8 + #define XX_CH2_ERR_CHK_WIDTH 1 + #define XX_CH1_DEG_DET_LBN 7 + #define XX_CH1_DEG_DET_WIDTH 1 + #define XX_CH1_LFSR_LOCK_IND_LBN 6 + #define XX_CH1_LFSR_LOCK_IND_WIDTH 1 + #define XX_CH1_PRBS_FRUN_LBN 5 + #define XX_CH1_PRBS_FRUN_WIDTH 1 + #define XX_CH1_ERR_CHK_LBN 4 + #define XX_CH1_ERR_CHK_WIDTH 1 + #define XX_CH0_DEG_DET_LBN 3 + #define XX_CH0_DEG_DET_WIDTH 1 + #define XX_CH0_LFSR_LOCK_IND_LBN 2 + #define XX_CH0_LFSR_LOCK_IND_WIDTH 1 + #define XX_CH0_PRBS_FRUN_LBN 1 + #define XX_CH0_PRBS_FRUN_WIDTH 1 + #define XX_CH0_ERR_CHK_LBN 0 + #define XX_CH0_ERR_CHK_WIDTH 1 +#define XX_PRBS_ERR_REG_KER_P0_OFST 0x1350 /* XAUI PRBS checker error + count register */ +#define XX_PRBS_ERR_REG_P0_OFST 0x1350 /* XAUI PRBS checker error count + register */ + #define XX_CH3_PRBS_ERR_CNT_LBN 24 + #define XX_CH3_PRBS_ERR_CNT_WIDTH 8 + #define XX_CH2_PRBS_ERR_CNT_LBN 16 + #define XX_CH2_PRBS_ERR_CNT_WIDTH 8 + #define XX_CH1_PRBS_ERR_CNT_LBN 8 + #define XX_CH1_PRBS_ERR_CNT_WIDTH 8 + #define XX_CH0_PRBS_ERR_CNT_LBN 0 + #define XX_CH0_PRBS_ERR_CNT_WIDTH 8 +#define XX_CORE_STAT_REG_KER_P0_OFST 0x1360 /* XAUI XGXS core status + register */ +#define XX_CORE_STAT_REG_P0_OFST 0x1360 /* XAUI XGXS core status register */ + #define XX_FORCE_SIG3_LBN 31 + #define XX_FORCE_SIG3_WIDTH 1 + #define XX_FORCE_SIG3_VAL_LBN 30 + #define XX_FORCE_SIG3_VAL_WIDTH 1 + #define XX_FORCE_SIG2_LBN 29 + #define XX_FORCE_SIG2_WIDTH 1 + #define XX_FORCE_SIG2_VAL_LBN 28 + #define XX_FORCE_SIG2_VAL_WIDTH 1 + #define XX_FORCE_SIG1_LBN 27 + #define XX_FORCE_SIG1_WIDTH 1 + #define XX_FORCE_SIG1_VAL_LBN 26 + #define XX_FORCE_SIG1_VAL_WIDTH 1 + #define XX_FORCE_SIG0_LBN 25 + #define XX_FORCE_SIG0_WIDTH 1 + #define XX_FORCE_SIG0_VAL_LBN 24 + #define XX_FORCE_SIG0_VAL_WIDTH 1 + #define XX_XGXS_LB_EN_LBN 23 + #define XX_XGXS_LB_EN_WIDTH 1 + #define XX_XGMII_LB_EN_LBN 22 + #define XX_XGMII_LB_EN_WIDTH 1 + #define XX_MATCH_FAULT_LBN 21 + #define XX_MATCH_FAULT_WIDTH 1 + #define XX_ALIGN_DONE_LBN 20 + #define XX_ALIGN_DONE_WIDTH 1 + #define XX_SYNC_STAT3_LBN 19 + #define XX_SYNC_STAT3_WIDTH 1 + #define XX_SYNC_STAT2_LBN 18 + #define XX_SYNC_STAT2_WIDTH 1 + #define XX_SYNC_STAT1_LBN 17 + #define XX_SYNC_STAT1_WIDTH 1 + #define XX_SYNC_STAT0_LBN 16 + #define XX_SYNC_STAT0_WIDTH 1 + #define XX_COMMA_DET_CH3_LBN 15 + #define XX_COMMA_DET_CH3_WIDTH 1 + #define XX_COMMA_DET_CH2_LBN 14 + #define XX_COMMA_DET_CH2_WIDTH 1 + #define XX_COMMA_DET_CH1_LBN 13 + #define XX_COMMA_DET_CH1_WIDTH 1 + #define XX_COMMA_DET_CH0_LBN 12 + #define XX_COMMA_DET_CH0_WIDTH 1 + #define XX_CGRP_ALIGN_CH3_LBN 11 + #define XX_CGRP_ALIGN_CH3_WIDTH 1 + #define XX_CGRP_ALIGN_CH2_LBN 10 + #define XX_CGRP_ALIGN_CH2_WIDTH 1 + #define XX_CGRP_ALIGN_CH1_LBN 9 + #define XX_CGRP_ALIGN_CH1_WIDTH 1 + #define XX_CGRP_ALIGN_CH0_LBN 8 + #define XX_CGRP_ALIGN_CH0_WIDTH 1 + #define XX_CHAR_ERR_CH3_LBN 7 + #define XX_CHAR_ERR_CH3_WIDTH 1 + #define XX_CHAR_ERR_CH2_LBN 6 + #define XX_CHAR_ERR_CH2_WIDTH 1 + #define XX_CHAR_ERR_CH1_LBN 5 + #define XX_CHAR_ERR_CH1_WIDTH 1 + #define XX_CHAR_ERR_CH0_LBN 4 + #define XX_CHAR_ERR_CH0_WIDTH 1 + #define XX_DISPERR_CH3_LBN 3 + #define XX_DISPERR_CH3_WIDTH 1 + #define XX_DISPERR_CH2_LBN 2 + #define XX_DISPERR_CH2_WIDTH 1 + #define XX_DISPERR_CH1_LBN 1 + #define XX_DISPERR_CH1_WIDTH 1 + #define XX_DISPERR_CH0_LBN 0 + #define XX_DISPERR_CH0_WIDTH 1 Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_xgrmon.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/falcon/falcon_xgrmon.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,125 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides EtherFabric NIC - EFXXXX (aka Falcon) 10G MAC + * statistics register definitions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +/*************---- 10G MAC Statistical Counters C Header ----*************/ +#define XgRxOctets_offset 0x0 + #define XgRxOctets_WIDTH 48 +#define XgRxOctetsOK_offset 0x8 + #define XgRxOctetsOK_WIDTH 48 +#define XgRxPkts_offset 0x10 + #define XgRxPkts_WIDTH 32 +#define XgRxPktsOK_offset 0x14 + #define XgRxPktsOK_WIDTH 32 +#define XgRxBroadcastPkts_offset 0x18 + #define XgRxBroadcastPkts_WIDTH 32 +#define XgRxMulticastPkts_offset 0x1C + #define XgRxMulticastPkts_WIDTH 32 +#define XgRxUnicastPkts_offset 0x20 + #define XgRxUnicastPkts_WIDTH 32 +#define XgRxUndersizePkts_offset 0x24 + #define XgRxUndersizePkts_WIDTH 32 +#define XgRxOversizePkts_offset 0x28 + #define XgRxOversizePkts_WIDTH 32 +#define XgRxJabberPkts_offset 0x2C + #define XgRxJabberPkts_WIDTH 32 +#define XgRxUndersizeFCSerrorPkts_offset 0x30 + #define XgRxUndersizeFCSerrorPkts_WIDTH 32 +#define XgRxDropEvents_offset 0x34 + #define XgRxDropEvents_WIDTH 32 +#define XgRxFCSerrorPkts_offset 0x38 + #define XgRxFCSerrorPkts_WIDTH 32 +#define XgRxAlignError_offset 0x3C + #define XgRxAlignError_WIDTH 32 +#define XgRxSymbolError_offset 0x40 + #define XgRxSymbolError_WIDTH 32 +#define XgRxInternalMACError_offset 0x44 + #define XgRxInternalMACError_WIDTH 32 +#define XgRxControlPkts_offset 0x48 + #define XgRxControlPkts_WIDTH 32 +#define XgRxPausePkts_offset 0x4C + #define XgRxPausePkts_WIDTH 32 +#define XgRxPkts64Octets_offset 0x50 + #define XgRxPkts64Octets_WIDTH 32 +#define XgRxPkts65to127Octets_offset 0x54 + #define XgRxPkts65to127Octets_WIDTH 32 +#define XgRxPkts128to255Octets_offset 0x58 + #define XgRxPkts128to255Octets_WIDTH 32 +#define XgRxPkts256to511Octets_offset 0x5C + #define XgRxPkts256to511Octets_WIDTH 32 +#define XgRxPkts512to1023Octets_offset 0x60 + #define XgRxPkts512to1023Octets_WIDTH 32 +#define XgRxPkts1024to15xxOctets_offset 0x64 + #define XgRxPkts1024to15xxOctets_WIDTH 32 +#define XgRxPkts15xxtoMaxOctets_offset 0x68 + #define XgRxPkts15xxtoMaxOctets_WIDTH 32 +#define XgRxLengthError_offset 0x6C + #define XgRxLengthError_WIDTH 32 +#define XgTxPkts_offset 0x80 + #define XgTxPkts_WIDTH 32 +#define XgTxOctets_offset 0x88 + #define XgTxOctets_WIDTH 48 +#define XgTxMulticastPkts_offset 0x90 + #define XgTxMulticastPkts_WIDTH 32 +#define XgTxBroadcastPkts_offset 0x94 + #define XgTxBroadcastPkts_WIDTH 32 +#define XgTxUnicastPkts_offset 0x98 + #define XgTxUnicastPkts_WIDTH 32 +#define XgTxControlPkts_offset 0x9C + #define XgTxControlPkts_WIDTH 32 +#define XgTxPausePkts_offset 0xA0 + #define XgTxPausePkts_WIDTH 32 +#define XgTxPkts64Octets_offset 0xA4 + #define XgTxPkts64Octets_WIDTH 32 +#define XgTxPkts65to127Octets_offset 0xA8 + #define XgTxPkts65to127Octets_WIDTH 32 +#define XgTxPkts128to255Octets_offset 0xAC + #define XgTxPkts128to255Octets_WIDTH 32 +#define XgTxPkts256to511Octets_offset 0xB0 + #define XgTxPkts256to511Octets_WIDTH 32 +#define XgTxPkts512to1023Octets_offset 0xB4 + #define XgTxPkts512to1023Octets_WIDTH 32 +#define XgTxPkts1024to15xxOctets_offset 0xB8 + #define XgTxPkts1024to15xxOctets_WIDTH 32 +#define XgTxPkts1519toMaxOctets_offset 0xBC + #define XgTxPkts1519toMaxOctets_WIDTH 32 +#define XgTxUndersizePkts_offset 0xC0 + #define XgTxUndersizePkts_WIDTH 32 +#define XgTxOversizePkts_offset 0xC4 + #define XgTxOversizePkts_WIDTH 32 +#define xGTxNonTcpUdpPkt_offset 0xC8 + #define xGTxNonTcpUdpPkt_WIDTH 16 +#define xGTxMacSrcErrPkt_offset 0xCC + #define xGTxMacSrcErrPkt_WIDTH 16 +#define xGTxIpSrcErrPkt_offset 0xD0 + #define xGTxIpSrcErrPkt_WIDTH 16 +#define XgDmaDone_offset 0xD4 + #define XgDmaDone_WIDTH 32 Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/workarounds.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/efab/hardware/workarounds.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,75 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides workaround settings for EtherFabric NICs. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_EFAB_WORKAROUNDS_H__ +#define __CI_DRIVER_EFAB_WORKAROUNDS_H__ + +/*---------------------------------------------------------------------------- + * + * Hardware workarounds which have global scope + * + *---------------------------------------------------------------------------*/ + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + +#if defined(__CI_HARDWARE_CONFIG_FALCON_B0__) +/*------------------------------- B0 ---------------------------------------*/ + +#define BUG2175_WORKAROUND 0 /* TX event batching for dual port operation. + This removes the effect (dup TX events) + of the fix + (TX event per packet + batch events) */ +#define BUG5302_WORKAROUND 0 /* unstick TX DMAQ after out-of-range wr ptr */ +#define BUG5475_WORKAROUND 1 /* 10G SNAP encapsulation broken */ +#define BUG5762_WORKAROUND 0 /* Set all queues to jumbo mode */ +#define BUG5391_WORKAROUND 0 /* Misaligned TX can't span 512-byte boundary */ +#define BUG7916_WORKAROUND 0 /* RX flush gets lost */ + +#else +/*------------------------------- A0/A1 ------------------------------------*/ + +#define BUG2175_WORKAROUND 1 /* TX event batching for dual port operation. + This removes the effect (dup TX events) + of the fix + (TX event per packet + batch events) */ +#define BUG5302_WORKAROUND 1 /* unstick TX DMAQ after out-of-range wr ptr */ +#define BUG5475_WORKAROUND 1 /* 10G SNAP encapsulation broken */ +#define BUG5762_WORKAROUND 1 /* Set all queues to jumbo mode */ +#define BUG5391_WORKAROUND 1 /* Misaligned TX can't span 512-byte boundary */ +#define BUG7916_WORKAROUND 1 /* RX flush gets lost */ + +#endif /* B0/A01 */ + +#else +# error Need hw support. +#endif + +#endif /* __CI_DRIVER_EFAB_WORKAROUNDS_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/resource/efx_vi.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,276 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains public EFX VI API to Solarflare resource manager. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_RESOURCE_EFX_VI_H__ +#define __CI_DRIVER_RESOURCE_EFX_VI_H__ + +/* Default size of event queue in the efx_vi resource. Copied from + * CI_CFG_NETIF_EVENTQ_SIZE */ +#define EFX_VI_EVENTQ_SIZE_DEFAULT 1024 + +extern int efx_vi_eventq_size; + +/************************************************************************** + * efx_vi_state types, allocation and free + **************************************************************************/ + +/*! Handle for refering to a efx_vi */ +struct efx_vi_state; + +/*! + * Allocate an efx_vi, including event queue and pt_endpoint + * + * \param vih_out Pointer to a handle that is set on success + * \param nic_index Index of NIC to apply this resource to + * \return Zero on success (and vih_out set), non-zero on failure. + */ +extern int +efx_vi_alloc(struct efx_vi_state **vih_out, int nic_index); + +/*! + * Free a previously allocated efx_vi + * + * \param vih The handle of the efx_vi to free + */ +extern void +efx_vi_free(struct efx_vi_state *vih); + +/*! + * Reset a previously allocated efx_vi + * + * \param vih The handle of the efx_vi to reset + */ +extern void +efx_vi_reset(struct efx_vi_state *vih); + +/************************************************************************** + * efx_vi_eventq types and functions + **************************************************************************/ + +/*! + * Register a function to receive callbacks when event queue timeouts + * or wakeups occur. Only one function per efx_vi can be registered + * at once. + * + * \param vih The handle to identify the efx_vi + * \param callback The function to callback + * \param context An argument to pass to the callback function + * \return Zero on success, non-zero on failure. + */ +extern int +efx_vi_eventq_register_callback(struct efx_vi_state *vih, + void (*callback)(void *context, int is_timeout), + void *context); + +/*! + * Remove the current eventq timeout or wakeup callback function + * + * \param vih The handle to identify the efx_vi + * \return Zero on success, non-zero on failure + */ +extern int +efx_vi_eventq_kill_callback(struct efx_vi_state *vih); + +/************************************************************************** + * efx_vi_dma_map types and functions + **************************************************************************/ + +/*! + * Handle for refering to a efx_vi + */ +struct efx_vi_dma_map_state; + +/*! + * Map a list of buffer pages so they are registered with the hardware + * + * \param vih The handle to identify the efx_vi + * \param addrs An array of page pointers to map + * \param n_addrs Length of the page pointer array. Must be a power of two. + * \param dmh_out Set on success to a handle used to refer to this mapping + * \return Zero on success, non-zero on failure. + */ +extern int +efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages, + int n_pages, struct efx_vi_dma_map_state **dmh_out); +extern int +efx_vi_dma_map_addrs(struct efx_vi_state *vih, + unsigned long long *dev_bus_addrs, int n_pages, + struct efx_vi_dma_map_state **dmh_out); + +/*! + * Unmap a previously mapped set of pages so they are no longer registered + * with the hardware. + * + * \param vih The handle to identify the efx_vi + * \param dmh The handle to identify the dma mapping + */ +extern void +efx_vi_dma_unmap_pages(struct efx_vi_state *vih, + struct efx_vi_dma_map_state *dmh); +extern void +efx_vi_dma_unmap_addrs(struct efx_vi_state *vih, + struct efx_vi_dma_map_state *dmh); + +/*! + * Retrieve the buffer address of the mapping + * + * \param vih The handle to identify the efx_vi + * \param dmh The handle to identify the buffer mapping + * \return The buffer address on success, or zero on failure + */ +extern unsigned +efx_vi_dma_get_map_addr(struct efx_vi_state *vih, + struct efx_vi_dma_map_state *dmh); + +/************************************************************************** + * efx_vi filter functions + **************************************************************************/ + +#define EFX_VI_STATIC_FILTERS 32 + +/*! Handle to refer to a filter instance */ +struct filter_resource_t; + +/*! + * Allocate and add a filter + * + * \param vih The handle to identify the efx_vi + * \param protocol The protocol of the new filter: UDP or TCP + * \param ip_addr_be32 The local ip address of the filter + * \param port_le16 The local port of the filter + * \param fh_out Set on success to be a handle to refer to this filter + * \return Zero on success, non-zero on failure. + */ +extern int +efx_vi_filter(struct efx_vi_state *vih, int protocol, unsigned ip_addr_be32, + int port_le16, struct filter_resource_t **fh_out); + +/*! + * Remove a filter and free resources associated with it + * + * \param vih The handle to identify the efx_vi + * \param fh The handle to identify the filter + * \return Zero on success, non-zero on failure + */ +extern int +efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh); + +/************************************************************************** + * efx_vi hw resources types and functions + **************************************************************************/ + +/*! Constants for the type field in efx_vi_hw_resource */ +#define EFX_VI_HW_RESOURCE_TXDMAQ 0x0 /* PFN of TX DMA Q */ +#define EFX_VI_HW_RESOURCE_RXDMAQ 0x1 /* PFN of RX DMA Q */ +#define EFX_VI_HW_RESOURCE_TXBELL 0x2 /* PFN of TX Doorbell (EF1) */ +#define EFX_VI_HW_RESOURCE_RXBELL 0x3 /* PFN of RX Doorbell (EF1) */ +#define EFX_VI_HW_RESOURCE_EVQTIMER 0x4 /* Address of event q timer */ + +/* Address of event q pointer (EF1) */ +#define EFX_VI_HW_RESOURCE_EVQPTR 0x5 +/* Address of register pointer (Falcon A) */ +#define EFX_VI_HW_RESOURCE_EVQRPTR 0x6 +/* Offset of register pointer (Falcon B) */ +#define EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET 0x7 +/* Address of mem KVA */ +#define EFX_VI_HW_RESOURCE_EVQMEMKVA 0x8 +/* PFN of doorbell page (Falcon) */ +#define EFX_VI_HW_RESOURCE_BELLPAGE 0x9 + +/*! How large an array to allocate for the get_() functions - smaller + than the total number of constants as some are mutually exclusive */ +#define EFX_VI_HW_RESOURCE_MAXSIZE 0x7 + +/*! Constants for the mem_type field in efx_vi_hw_resource */ +#define EFX_VI_HW_RESOURCE_IOBUFFER 0 /* Host memory */ +#define EFX_VI_HW_RESOURCE_PERIPHERAL 1 /* Card memory/registers */ + +/*! + * Data structure providing information on a hardware resource mapping + */ +struct efx_vi_hw_resource { + u8 type; /*!< What this resource represents */ + u8 mem_type; /*!< What type of memory is it in, eg, + * host or iomem */ + u8 more_to_follow; /*!< Is this part of a multi-region resource */ + u32 length; /*!< Length of the resource in bytes */ + unsigned long address; /*!< Address of this resource */ +}; + +/*! + * Metadata concerning the list of hardware resource mappings + */ +struct efx_vi_hw_resource_metadata { + int version; + int evq_order; + int evq_offs; + int evq_capacity; + int instance; + unsigned rx_capacity; + unsigned tx_capacity; + int nic_arch; + int nic_revision; + char nic_variant; +}; + +/*! + * Obtain a list of hardware resource mappings, using virtual addresses + * + * \param vih The handle to identify the efx_vi + * \param mdata Pointer to a structure to receive the metadata + * \param hw_res_array An array to receive the list of hardware resources + * \param length The length of hw_res_array. Updated on success to contain + * the number of entries in the supplied array that were used. + * \return Zero on success, non-zero on failure + */ +extern int +efx_vi_hw_resource_get_virt(struct efx_vi_state *vih, + struct efx_vi_hw_resource_metadata *mdata, + struct efx_vi_hw_resource *hw_res_array, + int *length); + +/*! + * Obtain a list of hardware resource mappings, using physical addresses + * + * \param vih The handle to identify the efx_vi + * \param mdata Pointer to a structure to receive the metadata + * \param hw_res_array An array to receive the list of hardware resources + * \param length The length of hw_res_array. Updated on success to contain + * the number of entries in the supplied array that were used. + * \return Zero on success, non-zero on failure + */ +extern int +efx_vi_hw_resource_get_phys(struct efx_vi_state *vih, + struct efx_vi_hw_resource_metadata *mdata, + struct efx_vi_hw_resource *hw_res_array, + int *length); + +#endif /* __CI_DRIVER_RESOURCE_EFX_VI_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/driver/resource/linux_efhw_nic.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,76 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains definition of the public type struct linux_efhw_nic. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_RESOURCE_LINUX_RESOURCE__ +#define __CI_DRIVER_RESOURCE_LINUX_RESOURCE__ + +#ifndef __linux__ +# error Silly +#endif +#ifndef __KERNEL__ +# error Silly +#endif + +#include +#include + + +/************************************************************************ + * Per-nic structure in the resource driver * + ************************************************************************/ + +struct linux_efhw_nic { + struct efhw_nic nic; + + struct pci_dev *pci_dev; /*!< pci descriptor */ + struct tasklet_struct tasklet; /*!< for interrupt bottom half */ + + /* Physical addresses of the control aperture bar. */ + unsigned long ctr_ap_pci_addr; + + /*! Callbacks for driverlink, when needed. */ + struct efx_dl_callbacks *dl_callbacks; + + /*! Event handlers. */ + struct efhw_ev_handler *ev_handlers; + +}; + +#define linux_efhw_nic(efhw_nic) \ + container_of(efhw_nic, struct linux_efhw_nic, nic) + +#endif /* __CI_DRIVER_RESOURCE_LINUX_RESOURCE__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/checks.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/checks.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,118 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides helpers to turn bit shifts into dword shifts and + * check that the bit fields haven't overflown the dword etc. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_CHECK_H__ +#define __CI_EFHW_CHECK_H__ + +/*---------------------------------------------------------------------------- + * + * Helpers to turn bit shifts into dword shifts and check that the bit fields + * haven't overflown the dword etc. Aim is to preserve consistency with the + * autogenerated headers - once stable we could hard code. + * + *---------------------------------------------------------------------------*/ + +/* mask constructors */ +#define __FALCON_MASK(WIDTH, T) ((((T)1) << (WIDTH)) - 1) +#define __FALCON_MASK32(WIDTH) __FALCON_MASK((WIDTH), uint32_t) +#define __FALCON_MASK64(WIDTH) __FALCON_MASK((WIDTH), uint64_t) + +#define __FALCON_MASKFIELD32(LBN, WIDTH) \ + ((uint32_t)(__FALCON_MASK32(WIDTH) << (LBN))) + +/* constructors for fields which span the first and second dwords */ +#define __LW(LBN) (32 - LBN) +#define __LOW(v, LBN, WIDTH) \ + ((uint32_t)(((v) & __FALCON_MASK64(__LW((LBN)))) << (LBN))) +#define __HIGH(v, LBN, WIDTH) \ + ((uint32_t)(((v) >> __LW((LBN))) & \ + __FALCON_MASK64((WIDTH - __LW((LBN)))))) +/* constructors for fields within the second dword */ +#define __DW2(LBN) ((LBN) - 32) + +/* constructors for fields which span the second and third dwords */ +#define __LW2(LBN) (64 - LBN) +#define __LOW2(v, LBN, WIDTH) \ + ((uint32_t)(((v) & __FALCON_MASK64(__LW2((LBN)))) << ((LBN) - 32))) +#define __HIGH2(v, LBN, WIDTH) \ + ((uint32_t)(((v) >> __LW2((LBN))) & \ + __FALCON_MASK64((WIDTH - __LW2((LBN)))))) + +/* constructors for fields within the third dword */ +#define __DW3(LBN) ((LBN) - 64) + +/* constructors for fields which span the third and fourth dwords */ +#define __LW3(LBN) (96 - LBN) +#define __LOW3(v, LBN, WIDTH) \ + ((uint32_t)(((v) & __FALCON_MASK64(__LW3((LBN)))) << ((LBN) - 64))) +#define __HIGH3(v, LBN, WIDTH) \ + ((ci_unit32)(((v) >> __LW3((LBN))) & \ + __FALCON_MASK64((WIDTH - __LW3((LBN)))))) + +/* constructors for fields within the fourth dword */ +#define __DW4(LBN) ((LBN) - 96) + +/* checks that the autogenerated headers are consistent with our model */ +#define __WIDTHCHCK(a, b) EFHW_ASSERT((a) == (b)) +#define __RANGECHCK(v, WIDTH) \ + EFHW_ASSERT(((uint64_t)(v) & ~(__FALCON_MASK64((WIDTH)))) == 0) + +/* fields within the first dword */ +#define __DWCHCK(LBN, WIDTH) \ + EFHW_ASSERT(((LBN) >= 0) && (((LBN)+(WIDTH)) <= 32)) + +/* fields which span the first and second dwords */ +#define __LWCHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW(LBN)) + +/* fields within the second dword */ +#define __DW2CHCK(LBN, WIDTH) \ + EFHW_ASSERT(((LBN) >= 32) && (((LBN)+(WIDTH)) <= 64)) + +/* fields which span the second and third dwords */ +#define __LW2CHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW2(LBN)) + +/* fields within the third dword */ +#define __DW3CHCK(LBN, WIDTH) \ + EFHW_ASSERT(((LBN) >= 64) && (((LBN)+(WIDTH)) <= 96)) + +/* fields which span the third and fourth dwords */ +#define __LW3CHK(LBN, WIDTH) EFHW_ASSERT(WIDTH >= __LW3(LBN)) + +/* fields within the fourth dword */ +#define __DW4CHCK(LBN, WIDTH) \ + EFHW_ASSERT(((LBN) >= 96) && (((LBN)+(WIDTH)) <= 128)) + +/* fields in the first qword */ +#define __QWCHCK(LBN, WIDTH) \ + EFHW_ASSERT(((LBN) >= 0) && (((LBN)+(WIDTH)) <= 64)) + +#endif /* __CI_EFHW_CHECK_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/common.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/common.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,102 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides API of the efhw library which may be used both from + * the kernel and from the user-space code. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_COMMON_H__ +#define __CI_EFHW_COMMON_H__ + +#include + +enum efhw_arch { + EFHW_ARCH_FALCON, + EFHW_ARCH_SIENA, +}; + +typedef uint32_t efhw_buffer_addr_t; +#define EFHW_BUFFER_ADDR_FMT "[ba:%"PRIx32"]" + +/*! Comment? */ +typedef union { + uint64_t u64; + struct { + uint32_t a; + uint32_t b; + } opaque; + struct { + uint32_t code; + uint32_t status; + } ev1002; +} efhw_event_t; + +/* Flags for TX/RX queues */ +#define EFHW_VI_JUMBO_EN 0x01 /*! scatter RX over multiple desc */ +#define EFHW_VI_ISCSI_RX_HDIG_EN 0x02 /*! iscsi rx header digest */ +#define EFHW_VI_ISCSI_TX_HDIG_EN 0x04 /*! iscsi tx header digest */ +#define EFHW_VI_ISCSI_RX_DDIG_EN 0x08 /*! iscsi rx data digest */ +#define EFHW_VI_ISCSI_TX_DDIG_EN 0x10 /*! iscsi tx data digest */ +#define EFHW_VI_TX_PHYS_ADDR_EN 0x20 /*! TX physical address mode */ +#define EFHW_VI_RX_PHYS_ADDR_EN 0x40 /*! RX physical address mode */ +#define EFHW_VI_RM_WITH_INTERRUPT 0x80 /*! VI with an interrupt */ +#define EFHW_VI_TX_IP_CSUM_DIS 0x100 /*! enable ip checksum generation */ +#define EFHW_VI_TX_TCPUDP_CSUM_DIS 0x200 /*! enable tcp/udp checksum + generation */ +#define EFHW_VI_TX_TCPUDP_ONLY 0x400 /*! drop non-tcp/udp packets */ + +/* Types of hardware filter */ +/* Each of these values implicitly selects scatter filters on B0 - or in + EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK if a non-scatter filter is required */ +#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD (0) /* dest host only */ +#define EFHW_IP_FILTER_TYPE_UDP_FULL (1) /* dest host and port */ +#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD (2) /* dest based filter */ +#define EFHW_IP_FILTER_TYPE_TCP_FULL (3) /* src filter */ +/* Same again, but with RSS (for B0 only) */ +#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD_RSS_B0 (4) +#define EFHW_IP_FILTER_TYPE_UDP_FULL_RSS_B0 (5) +#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD_RSS_B0 (6) +#define EFHW_IP_FILTER_TYPE_TCP_FULL_RSS_B0 (7) + +#define EFHW_IP_FILTER_TYPE_FULL_MASK (0x1) /* Mask for full / wildcard */ +#define EFHW_IP_FILTER_TYPE_TCP_MASK (0x2) /* Mask for TCP type */ +#define EFHW_IP_FILTER_TYPE_RSS_B0_MASK (0x4) /* Mask for B0 RSS enable */ +#define EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK (0x8) /* Mask for B0 SCATTER dsbl */ + +#define EFHW_IP_FILTER_TYPE_MASK (0xffff) /* Mask of types above */ + +#define EFHW_IP_FILTER_BROADCAST (0x10000) /* driverlink filter + support */ + +#endif /* __CI_EFHW_COMMON_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/common_sysdep.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,71 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides version-independent Linux kernel API for + * userland-to-kernel interfaces. + * Only kernels >=2.6.9 are supported. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_COMMON_LINUX_H__ +#define __CI_EFHW_COMMON_LINUX_H__ + +#include +#include + +/* Dirty hack, but Linux kernel does not provide DMA_ADDR_T_FMT */ +#if BITS_PER_LONG == 64 || defined(CONFIG_HIGHMEM64G) +#define DMA_ADDR_T_FMT "%llx" +#else +#define DMA_ADDR_T_FMT "%x" +#endif + +/* Linux kernel also does not provide PRIx32... Sigh. */ +#define PRIx32 "x" + +#ifdef __ia64__ +# define PRIx64 "lx" +#else +# define PRIx64 "llx" +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +enum { + false = 0, + true = 1 +}; + +typedef _Bool bool; +#endif /* LINUX_VERSION_CODE < 2.6.19 */ + +#endif /* __CI_EFHW_COMMON_LINUX_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/debug.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/debug.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,84 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides debug-related API for efhw library using Linux kernel + * primitives. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_DEBUG_LINUX_H__ +#define __CI_EFHW_DEBUG_LINUX_H__ + +#define EFHW_PRINTK_PREFIX "[sfc efhw] " + +#define EFHW_PRINTK(level, fmt, ...) \ + printk(level EFHW_PRINTK_PREFIX fmt "\n", __VA_ARGS__) + +/* Following macros should be used with non-zero format parameters + * due to __VA_ARGS__ limitations. Use "%s" with __FUNCTION__ if you can't + * find better parameters. */ +#define EFHW_ERR(fmt, ...) EFHW_PRINTK(KERN_ERR, fmt, __VA_ARGS__) +#define EFHW_WARN(fmt, ...) EFHW_PRINTK(KERN_WARNING, fmt, __VA_ARGS__) +#define EFHW_NOTICE(fmt, ...) EFHW_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__) +#if 0 && !defined(NDEBUG) +#define EFHW_TRACE(fmt, ...) EFHW_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__) +#else +#define EFHW_TRACE(fmt, ...) +#endif + +#ifndef NDEBUG +#define EFHW_ASSERT(cond) BUG_ON((cond) == 0) +#define EFHW_DO_DEBUG(expr) expr +#else +#define EFHW_ASSERT(cond) +#define EFHW_DO_DEBUG(expr) +#endif + +#define EFHW_TEST(expr) \ + do { \ + if (unlikely(!(expr))) \ + BUG(); \ + } while (0) + +/* Build time asserts. We paste the line number into the type name + * so that the macro can be used more than once per file even if the + * compiler objects to multiple identical typedefs. Collisions + * between use in different header files is still possible. */ +#ifndef EFHW_BUILD_ASSERT +#define __EFHW_BUILD_ASSERT_NAME(_x) __EFHW_BUILD_ASSERT_ILOATHECPP(_x) +#define __EFHW_BUILD_ASSERT_ILOATHECPP(_x) __EFHW_BUILD_ASSERT__ ##_x +#define EFHW_BUILD_ASSERT(e) \ + typedef char __EFHW_BUILD_ASSERT_NAME(__LINE__)[(e) ? 1 : -1] +#endif + +#endif /* __CI_EFHW_DEBUG_LINUX_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/efhw_config.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/efhw_config.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,43 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides some limits used in both kernel and userland code. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_EFAB_CONFIG_H__ +#define __CI_EFHW_EFAB_CONFIG_H__ + +#define EFHW_MAX_NR_DEVS 5 /* max number of efhw devices supported */ + +#endif /* __CI_EFHW_EFAB_CONFIG_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/efhw_types.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,342 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides struct efhw_nic and some related types. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_EFAB_TYPES_H__ +#define __CI_EFHW_EFAB_TYPES_H__ + +#include +#include +#include +#include + +/*-------------------------------------------------------------------- + * + * hardware limits used in the types + * + *--------------------------------------------------------------------*/ + +#define EFHW_KEVENTQ_MAX 8 + +/*-------------------------------------------------------------------- + * + * forward type declarations + * + *--------------------------------------------------------------------*/ + +struct efhw_nic; + +/*-------------------------------------------------------------------- + * + * Managed interface + * + *--------------------------------------------------------------------*/ + +struct efhw_buffer_table_allocation{ + unsigned base; + unsigned order; +}; + +struct eventq_resource_hardware { + /*!iobuffer allocated for eventq - can be larger than eventq */ + efhw_iopages_t iobuff; + unsigned iobuff_off; + struct efhw_buffer_table_allocation buf_tbl_alloc; + int capacity; /*!< capacity of event queue */ +}; + +/*-------------------------------------------------------------------- + * + * event queues and event driven callbacks + * + *--------------------------------------------------------------------*/ + +struct efhw_keventq { + volatile int lock; + caddr_t evq_base; + int32_t evq_ptr; + uint32_t evq_mask; + unsigned instance; + struct eventq_resource_hardware hw; + struct efhw_ev_handler *ev_handlers; +}; + +/********************************************************************** + * Portable HW interface. *************************************** + **********************************************************************/ + +/*-------------------------------------------------------------------- + * + * EtherFabric Functional units - configuration and control + * + *--------------------------------------------------------------------*/ + +struct efhw_func_ops { + + /*-------------- Initialisation ------------ */ + + /*! close down all hardware functional units - leaves NIC in a safe + state for driver unload */ + void (*close_hardware) (struct efhw_nic *nic); + + /*! initialise all hardware functional units */ + int (*init_hardware) (struct efhw_nic *nic, + struct efhw_ev_handler *, + const uint8_t *mac_addr); + + /*-------------- Interrupt support ------------ */ + + /*! Main interrupt routine + ** This function returns, + ** - zero, if the IRQ was not generated by EF1 + ** - non-zero, if EF1 was the source of the IRQ + ** + ** + ** opaque is an OS provided pointer for use by the OS callbacks + ** e.g in Windows used to indicate DPC scheduled + */ + int (*interrupt) (struct efhw_nic *nic); + + /*! Enable given interrupt mask for the given IRQ unit */ + void (*interrupt_enable) (struct efhw_nic *nic, uint idx); + + /*! Disable given interrupt mask for the given IRQ unit */ + void (*interrupt_disable) (struct efhw_nic *nic, uint idx); + + /*! Set interrupt moderation strategy for the given IRQ unit + ** val is in usec + */ + void (*set_interrupt_moderation)(struct efhw_nic *nic, + uint idx, uint val); + + /*-------------- Event support ------------ */ + + /*! Enable the given event queue + depending on the underlying implementation (EF1 or Falcon) then + either a q_base_addr in host memory, or a buffer base id should + be proivded + */ + void (*event_queue_enable) (struct efhw_nic *nic, + uint evq, /* evnt queue index */ + uint evq_size, /* units of #entries */ + dma_addr_t q_base_addr, uint buf_base_id); + + /*! Disable the given event queue (and any associated timer) */ + void (*event_queue_disable) (struct efhw_nic *nic, uint evq, + int timer_only); + + /*! request wakeup from the NIC on a given event Q */ + void (*wakeup_request) (struct efhw_nic *nic, dma_addr_t q_base_addr, + int next_i, int evq); + + /*! Push a SW event on a given eventQ */ + void (*sw_event) (struct efhw_nic *nic, int data, int evq); + + /*-------------- Filter support ------------ */ + + /*! Setup a given filter - The software can request a filter_i, + * but some EtherFabric implementations will override with + * a more suitable index + */ + int (*ipfilter_set) (struct efhw_nic *nic, int type, + int *filter_i, int dmaq, + unsigned saddr_be32, unsigned sport_be16, + unsigned daddr_be32, unsigned dport_be16); + + /*! Attach a given filter to a DMAQ */ + void (*ipfilter_attach) (struct efhw_nic *nic, int filter_idx, + int dmaq_idx); + + /*! Detach a filter from its DMAQ */ + void (*ipfilter_detach) (struct efhw_nic *nic, int filter_idx); + + /*! Clear down a given filter */ + void (*ipfilter_clear) (struct efhw_nic *nic, int filter_idx); + + /*-------------- DMA support ------------ */ + + /*! Initialise NIC state for a given TX DMAQ */ + void (*dmaq_tx_q_init) (struct efhw_nic *nic, + uint dmaq, uint evq, uint owner, uint tag, + uint dmaq_size, uint buf_idx, uint flags); + + /*! Initialise NIC state for a given RX DMAQ */ + void (*dmaq_rx_q_init) (struct efhw_nic *nic, + uint dmaq, uint evq, uint owner, uint tag, + uint dmaq_size, uint buf_idx, uint flags); + + /*! Disable a given TX DMAQ */ + void (*dmaq_tx_q_disable) (struct efhw_nic *nic, uint dmaq); + + /*! Disable a given RX DMAQ */ + void (*dmaq_rx_q_disable) (struct efhw_nic *nic, uint dmaq); + + /*! Flush a given TX DMA channel */ + int (*flush_tx_dma_channel) (struct efhw_nic *nic, uint dmaq); + + /*! Flush a given RX DMA channel */ + int (*flush_rx_dma_channel) (struct efhw_nic *nic, uint dmaq); + + /*-------------- Buffer table Support ------------ */ + + /*! Initialise a buffer table page */ + void (*buffer_table_set) (struct efhw_nic *nic, + dma_addr_t dma_addr, + uint bufsz, uint region, + int own_id, int buffer_id); + + /*! Initialise a block of buffer table pages */ + void (*buffer_table_set_n) (struct efhw_nic *nic, int buffer_id, + dma_addr_t dma_addr, + uint bufsz, uint region, + int n_pages, int own_id); + + /*! Clear a block of buffer table pages */ + void (*buffer_table_clear) (struct efhw_nic *nic, int buffer_id, + int num); + + /*! Commit a buffer table update */ + void (*buffer_table_commit) (struct efhw_nic *nic); + +}; + + +/*---------------------------------------------------------------------------- + * + * NIC type + * + *---------------------------------------------------------------------------*/ + +struct efhw_device_type { + int arch; /* enum efhw_arch */ + char variant; /* 'A', 'B', ... */ + int revision; /* 0, 1, ... */ +}; + + +/*---------------------------------------------------------------------------- + * + * EtherFabric NIC instance - nic.c for HW independent functions + * + *---------------------------------------------------------------------------*/ + +/*! */ +struct efhw_nic { + /*! zero base index in efrm_nic_table.nic array */ + volatile int index; + int ifindex; /*!< OS level nic index */ +#ifdef HAS_NET_NAMESPACE + struct net *nd_net; +#endif + + struct efhw_device_type devtype; + + /*! Options that can be set by user. */ + unsigned options; +# define NIC_OPT_EFTEST 0x1 /* owner is an eftest app */ + +# define NIC_OPT_DEFAULT 0 + + /*! Internal flags that indicate hardware properties at runtime. */ + unsigned flags; +# define NIC_FLAG_NO_INTERRUPT 0x01 /* to be set at init time only */ +# define NIC_FLAG_TRY_MSI 0x02 +# define NIC_FLAG_MSI 0x04 +# define NIC_FLAG_OS_IRQ_EN 0x08 +# define NIC_FLAG_10G 0x10 + + unsigned mtu; /*!< MAC MTU (includes MAC hdr) */ + + /* hardware resources */ + + /*! I/O address of the start of the bar */ + efhw_ioaddr_t bar_ioaddr; + + /*! Bar number of control aperture. */ + unsigned ctr_ap_bar; + /*! Length of control aperture in bytes. */ + unsigned ctr_ap_bytes; + + uint8_t mac_addr[ETH_ALEN]; /*!< mac address */ + + /*! EtherFabric Functional Units -- functions */ + const struct efhw_func_ops *efhw_func; + + /* Value read from FPGA version register. Zero for asic. */ + unsigned fpga_version; + + /*! This lock protects a number of misc NIC resources. It should + * only be used for things that can be at the bottom of the lock + * order. ie. You mustn't attempt to grab any other lock while + * holding this one. + */ + spinlock_t *reg_lock; + spinlock_t the_reg_lock; + + int buf_commit_outstanding; /*!< outstanding buffer commits */ + + /*! interrupt callbacks (hard-irq) */ + void (*irq_handler) (struct efhw_nic *, int unit); + + /*! event queues per driver */ + struct efhw_keventq evq[EFHW_KEVENTQ_MAX]; + +/* for marking when we are not using an IRQ unit + - 0 is a valid offset to an IRQ unit on EF1! */ +#define EFHW_IRQ_UNIT_UNUSED 0xffff + /*! interrupt unit in use */ + unsigned int irq_unit[EFHW_KEVENTQ_MAX]; + efhw_iopage_t irq_iobuff; /*!< Falcon SYSERR interrupt */ + + /* The new driverlink infrastructure. */ + struct efx_dl_device *net_driver_dev; + struct efx_dlfilt_cb_s *dlfilter_cb; + + /*! Bit masks of the sizes of event queues and dma queues supported + * by the nic. */ + unsigned evq_sizes; + unsigned rxq_sizes; + unsigned txq_sizes; + + /* Size of filter table (including odd and even banks). */ + unsigned filter_tbl_size; +}; + + +#define EFHW_KVA(nic) ((nic)->bar_ioaddr) + + +#endif /* __CI_EFHW_EFHW_TYPES_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/eventq.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,73 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains API provided by efhw/eventq.c file. This file is not + * designed for use outside of the SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_EVENTQ_H__ +#define __CI_EFHW_EVENTQ_H__ + +#include +#include + +/*! Poll the event queue. */ +extern int efhw_keventq_poll(struct efhw_nic *, struct efhw_keventq *); + +/*! Callbacks for handling events. */ +struct efhw_ev_handler { + void (*wakeup_fn)(struct efhw_nic *nic, efhw_event_t *ev); + void (*timeout_fn)(struct efhw_nic *nic, efhw_event_t *ev); + void (*sw_fn)(struct efhw_nic *nic, efhw_event_t *ev); + void (*dmaq_flushed_fn) (struct efhw_nic *, int, int); +}; + +extern int efhw_keventq_ctor(struct efhw_nic *, int instance, + struct efhw_keventq *, struct efhw_ev_handler *); +extern void efhw_keventq_dtor(struct efhw_nic *, struct efhw_keventq *); + +extern void efhw_handle_txdmaq_flushed(struct efhw_nic *, + struct efhw_ev_handler *, + efhw_event_t *); +extern void efhw_handle_rxdmaq_flushed(struct efhw_nic *, + struct efhw_ev_handler *, + efhw_event_t *); +extern void efhw_handle_wakeup_event(struct efhw_nic *, + struct efhw_ev_handler *, + efhw_event_t *); +extern void efhw_handle_timeout_event(struct efhw_nic *, + struct efhw_ev_handler *, + efhw_event_t *); + +#endif /* __CI_EFHW_EVENTQ_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/eventq_macros.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/eventq_macros.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,81 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides some event-related macros. This file is designed for + * use from kernel and from the userland contexts. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_EVENTQ_MACROS_H__ +#define __CI_EFHW_EVENTQ_MACROS_H__ + +#include + +/*-------------------------------------------------------------------- + * + * Event Queue manipulation + * + *--------------------------------------------------------------------*/ + +#define EFHW_EVENT_OFFSET(q, s, i) \ + (((s)->evq_ptr - (i) * (int32_t)sizeof(efhw_event_t)) \ + & (q)->evq_mask) + +#define EFHW_EVENT_PTR(q, s, i) \ + ((efhw_event_t *)((q)->evq_base + EFHW_EVENT_OFFSET(q, s, i))) + +#define EFHW_EVENTQ_NEXT(s) \ + do { ((s)->evq_ptr += sizeof(efhw_event_t)); } while (0) + +#define EFHW_EVENTQ_PREV(s) \ + do { ((s)->evq_ptr -= sizeof(efhw_event_t)); } while (0) + +/* Be worried about this on byteswapped machines */ +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + /* Due to crazy chipsets, we see the event words being written in + ** arbitrary order (bug4539). So test for presence of event must ensure + ** that both halves have changed from the null. + */ + #define EFHW_IS_EVENT(evp) \ + (((evp)->opaque.a != (uint32_t)-1) && \ + ((evp)->opaque.b != (uint32_t)-1)) + #define EFHW_CLEAR_EVENT(evp) ((evp)->u64 = (uint64_t)-1) + #define EFHW_CLEAR_EVENT_VALUE 0xff +#else + #error Fixme - unknown hardware configuration +#endif + +#define EFHW_EVENT_OVERFLOW(evq, s) \ + (EFHW_IS_EVENT(EFHW_EVENT_PTR(evq, s, 1))) + +#endif /* __CI_EFHW_EVENTQ_MACROS_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/falcon.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,93 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains API provided by efhw/falcon.c file. This file is not + * designed for use outside of the SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_FALCON_H__ +#define __CI_EFHW_FALCON_H__ + +#include +#include + +/*---------------------------------------------------------------------------- + * + * Locks - unfortunately required + * + *---------------------------------------------------------------------------*/ + +#define FALCON_LOCK_DECL irq_flags_t lock_state +#define FALCON_LOCK_LOCK(nic) \ + spin_lock_irqsave((nic)->reg_lock, lock_state) +#define FALCON_LOCK_UNLOCK(nic) \ + spin_unlock_irqrestore((nic)->reg_lock, lock_state) + +extern struct efhw_func_ops falcon_char_functional_units; + +/*! specify a pace value for a TX DMA Queue */ +extern void falcon_nic_pace(struct efhw_nic *nic, uint dmaq, uint pace); + +/*! confirm buffer table updates - should be used for items where + loss of data would be unacceptable. E.g for the buffers that back + an event or DMA queue */ +extern void falcon_nic_buffer_table_confirm(struct efhw_nic *nic); + +/*! Reset the all the TX DMA queue pointers. */ +extern void falcon_clobber_tx_dma_ptrs(struct efhw_nic *nic, uint dmaq); + +extern int +falcon_handle_char_event(struct efhw_nic *nic, + struct efhw_ev_handler *h, efhw_event_t *evp); + +/*! map event queue instance space (0,1,2,..) onto event queue + number. This function takes into account the allocation rules for + the underlying driver model */ +extern int falcon_idx_to_evq(struct efhw_nic *nic, uint idx); + +/*! Acknowledge to HW that processing is complete on a given event queue */ +extern void falcon_nic_evq_ack(struct efhw_nic *nic, uint evq, /* evq id */ + uint rptr, /* new read pointer update */ + bool wakeup /* request a wakeup event if + ptr's != */ + ); + +extern void +falcon_nic_buffer_table_set_n(struct efhw_nic *nic, int buffer_id, + dma_addr_t dma_addr, uint bufsz, uint region, + int n_pages, int own_id); + +extern void falcon_nic_ipfilter_ctor(struct efhw_nic *nic); + +#endif /* __CI_EFHW_FALCON_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/falcon_hash.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/falcon_hash.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,58 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains API provided by efhw/falcon_hash.c file. + * Function declared in this file are not exported from the Linux + * sfc_resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_FALCON_HASH_H__ +#define __CI_EFHW_FALCON_HASH_H__ + +/* All LE parameters */ +extern unsigned int +falcon_hash_get_key(unsigned int src_ip, unsigned int src_port, + unsigned int dest_ip, unsigned int dest_port, + int tcp, int full); + +unsigned int falcon_hash_function1(unsigned int key, unsigned int nfilters); + +extern unsigned int +falcon_hash_function2(unsigned int key, unsigned int nfitlers); + +extern unsigned int +falcon_hash_iterator(unsigned int hash1, unsigned int hash2, + unsigned int n_search, unsigned int nfilters); + +#endif /* __CI_EFHW_FALCON_HASH_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/hardware_sysdep.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,84 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides version-independent Linux kernel API for header files + * with hardware-related definitions (in ci/driver/efab/hardware*). + * Only kernels >=2.6.9 are supported. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_HARDWARE_LINUX_H__ +#define __CI_EFHW_HARDWARE_LINUX_H__ + +#include + +#ifdef __LITTLE_ENDIAN +#define EFHW_IS_LITTLE_ENDIAN +#elif __BIG_ENDIAN +#define EFHW_IS_BIG_ENDIAN +#else +#error Unknown endianness +#endif + +#ifndef mmiowb + #if defined(__i386__) || defined(__x86_64__) + #define mmiowb() + #elif defined(__ia64__) + #ifndef ia64_mfa + #define ia64_mfa() asm volatile ("mf.a" ::: "memory") + #endif + #define mmiowb ia64_mfa + #else + #error "Need definition for mmiowb()" + #endif +#endif + +typedef char *efhw_ioaddr_t; + +#ifndef readq +static inline uint64_t __readq(void __iomem *addr) +{ + return *(volatile uint64_t *)addr; +} +#define readq(x) __readq(x) +#endif + +#ifndef writeq +static inline void __writeq(uint64_t v, void __iomem *addr) +{ + *(volatile uint64_t *)addr = v; +} +#define writeq(val, addr) __writeq((val), (addr)) +#endif + +#endif /* __CI_EFHW_HARDWARE_LINUX_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/iopage.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,58 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains OS-independent API for allocating iopage types. + * The implementation of these functions is highly OS-dependent. + * This file is not designed for use outside of the SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_RESOURCE_IOPAGE_H__ +#define __CI_DRIVER_RESOURCE_IOPAGE_H__ + +#include + +/*-------------------------------------------------------------------- + * + * memory allocation + * + *--------------------------------------------------------------------*/ + +extern int efhw_iopage_alloc(struct efhw_nic *, efhw_iopage_t *p); +extern void efhw_iopage_free(struct efhw_nic *, efhw_iopage_t *p); + +extern int efhw_iopages_alloc(struct efhw_nic *, efhw_iopages_t *p, + unsigned order); +extern void efhw_iopages_free(struct efhw_nic *, efhw_iopages_t *p); + +#endif /* __CI_DRIVER_RESOURCE_IOPAGE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/iopage_types.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,188 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides efhw_page_t and efhw_iopage_t for Linux kernel. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_IOPAGE_LINUX_H__ +#define __CI_EFHW_IOPAGE_LINUX_H__ + +#include +#include +#include + +/*-------------------------------------------------------------------- + * + * efhw_page_t: A single page of memory. Directly mapped in the driver, + * and can be mapped to userlevel. + * + *--------------------------------------------------------------------*/ + +typedef struct { + unsigned long kva; +} efhw_page_t; + +static inline int efhw_page_alloc(efhw_page_t *p) +{ + p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL); + return p->kva ? 0 : -ENOMEM; +} + +static inline int efhw_page_alloc_zeroed(efhw_page_t *p) +{ + p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL); + return p->kva ? 0 : -ENOMEM; +} + +static inline void efhw_page_free(efhw_page_t *p) +{ + free_page(p->kva); + EFHW_DO_DEBUG(memset(p, 0, sizeof(*p))); +} + +static inline char *efhw_page_ptr(efhw_page_t *p) +{ + return (char *)p->kva; +} + +static inline unsigned efhw_page_pfn(efhw_page_t *p) +{ + return (unsigned)(__pa(p->kva) >> PAGE_SHIFT); +} + +static inline void efhw_page_mark_invalid(efhw_page_t *p) +{ + p->kva = 0; +} + +static inline int efhw_page_is_valid(efhw_page_t *p) +{ + return p->kva != 0; +} + +static inline void efhw_page_init_from_va(efhw_page_t *p, void *va) +{ + p->kva = (unsigned long)va; +} + +/*-------------------------------------------------------------------- + * + * efhw_iopage_t: A single page of memory. Directly mapped in the driver, + * and can be mapped to userlevel. Can also be accessed by the NIC. + * + *--------------------------------------------------------------------*/ + +typedef struct { + efhw_page_t p; + dma_addr_t dma_addr; +} efhw_iopage_t; + +static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p) +{ + return p->dma_addr; +} + +#define efhw_iopage_ptr(iop) efhw_page_ptr(&(iop)->p) +#define efhw_iopage_pfn(iop) efhw_page_pfn(&(iop)->p) +#define efhw_iopage_mark_invalid(iop) efhw_page_mark_invalid(&(iop)->p) +#define efhw_iopage_is_valid(iop) efhw_page_is_valid(&(iop)->p) + +/*-------------------------------------------------------------------- + * + * efhw_iopages_t: A set of pages that are contiguous in physical memory. + * Directly mapped in the driver, and can be mapped to userlevel. Can also + * be accessed by the NIC. + * + * NB. The O/S may be unwilling to allocate many, or even any of these. So + * only use this type where the NIC really needs a physically contiguous + * buffer. + * + *--------------------------------------------------------------------*/ + +typedef struct { + caddr_t kva; + unsigned order; + dma_addr_t dma_addr; +} efhw_iopages_t; + +static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p) +{ + return p->kva; +} + +static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p) +{ + return (unsigned)(__pa(p->kva) >> PAGE_SHIFT); +} + +static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p) +{ + return p->dma_addr; +} + +static inline unsigned efhw_iopages_size(efhw_iopages_t *p) +{ + return 1u << (p->order + PAGE_SHIFT); +} + +/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically + * contiguous allocations in iobufsets for iSCSI. This allows the + * essential information about contiguous allocations from + * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an + * iobufset. (Changing the iobufset resource to use a union type would + * involve a lot of code changes, and make the iobufset's metadata larger + * which could be bad as it's supposed to fit into a single page on some + * platforms.) + */ +static inline void +efhw_iopage_init_from_iopages(efhw_iopage_t *iopage, + efhw_iopages_t *iopages, unsigned pageno) +{ + iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages)) + + (pageno * PAGE_SIZE); + iopage->dma_addr = efhw_iopages_dma_addr(iopages) + + (pageno * PAGE_SIZE); +} + +static inline void +efhw_iopages_init_from_iopage(efhw_iopages_t *iopages, + efhw_iopage_t *iopage, unsigned order) +{ + iopages->kva = (caddr_t) efhw_iopage_ptr(iopage); + EFHW_ASSERT(iopages->kva); + iopages->order = order; + iopages->dma_addr = efhw_iopage_dma_addr(iopage); +} + +#endif /* __CI_EFHW_IOPAGE_LINUX_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/nic.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/nic.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,62 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains API provided by efhw/nic.c file. This file is not + * designed for use outside of the SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_NIC_H__ +#define __CI_EFHW_NIC_H__ + +#include +#include + + +/* Convert PCI info to device type. Returns false when device is not + * recognised. + */ +extern int efhw_device_type_init(struct efhw_device_type *dt, + int vendor_id, int device_id, int revision); + +/* Initialise fields that do not involve touching hardware. */ +extern void efhw_nic_init(struct efhw_nic *nic, unsigned flags, + unsigned options, struct efhw_device_type dev_type); + +/*! Destruct NIC resources */ +extern void efhw_nic_dtor(struct efhw_nic *nic); + +/*! Shutdown interrupts */ +extern void efhw_nic_close_interrupts(struct efhw_nic *nic); + +#endif /* __CI_EFHW_NIC_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/public.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/public.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,83 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides public API of efhw library exported from the SFC + * resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_PUBLIC_H__ +#define __CI_EFHW_PUBLIC_H__ + +#include +#include + +/*! Returns true if we have some EtherFabric functional units - + whether configured or not */ +static inline int efhw_nic_have_functional_units(struct efhw_nic *nic) +{ + return nic->efhw_func != 0; +} + +/*! Returns true if the EtherFabric functional units have been configured */ +static inline int efhw_nic_have_hw(struct efhw_nic *nic) +{ + return efhw_nic_have_functional_units(nic) && (EFHW_KVA(nic) != 0); +} + +/*! Helper function to allocate the iobuffer needed by an eventq + * - it ensures the eventq has the correct alignment for the NIC + * + * \param rm Event-queue resource manager + * \param instance Event-queue instance (index) + * \param buf_bytes Requested size of eventq + * \return < 0 if iobuffer allocation fails + */ +int efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic, + struct eventq_resource_hardware *h, + int evq_instance, unsigned buf_bytes); + +extern void falcon_nic_set_rx_usr_buf_size(struct efhw_nic *, + int rx_usr_buf_size); + +extern void +falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full, + uint32_t tcp_wild, + uint32_t udp_full, uint32_t udp_wild); + +extern void +falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full, + uint32_t *tcp_wild, + uint32_t *udp_full, uint32_t *udp_wild); + +#endif /* __CI_EFHW_PUBLIC_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/sysdep.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efhw/sysdep.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,72 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides version-independent Linux kernel API for efhw library. + * Only kernels >=2.6.9 are supported. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFHW_SYSDEP_LINUX_H__ +#define __CI_EFHW_SYSDEP_LINUX_H__ + +#include +#include +#include +#include +#include + +#include /* necessary for etherdevice.h on some kernels */ +#include + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21) +static inline int is_local_ether_addr(const u8 *addr) +{ + return (0x02 & addr[0]); +} +#endif + +typedef unsigned long irq_flags_t; + +#define spin_lock_destroy(l_) do {} while (0) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) +#define HAS_NET_NAMESPACE +#endif + +/* Funny, but linux has round_up for x86 only, defined in + * x86-specific header */ +#ifndef round_up +#define round_up(x, y) (((x) + (y) - 1) & ~((y)-1)) +#endif + +#endif /* __CI_EFHW_SYSDEP_LINUX_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/buddy.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/buddy.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,69 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides private API for buddy allocator. This API is not + * designed for use outside of SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_BUDDY_H__ +#define __CI_EFRM_BUDDY_H__ + +#include + +/*! Comment? */ +struct efrm_buddy_allocator { + struct list_head *free_lists; /* array[order+1] */ + struct list_head *links; /* array[1<order; +} + +int efrm_buddy_ctor(struct efrm_buddy_allocator *b, unsigned order); +void efrm_buddy_dtor(struct efrm_buddy_allocator *b); +int efrm_buddy_alloc(struct efrm_buddy_allocator *b, unsigned order); +void efrm_buddy_free(struct efrm_buddy_allocator *b, unsigned addr, + unsigned order); +void efrm_buddy_reserve_at_start(struct efrm_buddy_allocator *b, unsigned n); +void efrm_buddy_reserve_at_end(struct efrm_buddy_allocator *b, unsigned n); + +#endif /* __CI_EFRM_BUDDY_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/buffer_table.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/buffer_table.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,86 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides private buffer table API. This API is not designed + * for use outside of SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_BUFFER_TABLE_H__ +#define __CI_EFRM_BUFFER_TABLE_H__ + +#include + +/*-------------------------------------------------------------------- + * + * NIC's buffer table. + * + *--------------------------------------------------------------------*/ + +/*! Managed interface. */ + +/*! construct a managed buffer table object, allocated over a region of + * the NICs buffer table space + */ +extern int efrm_buffer_table_ctor(unsigned low, unsigned high); +/*! destructor for above */ +extern void efrm_buffer_table_dtor(void); + +/*! allocate a contiguous region of buffer table space */ +extern int efrm_buffer_table_alloc(unsigned order, + struct efhw_buffer_table_allocation *a); + +/*! current size of the buffer table. + * FIXME This function should be inline, but it is never used from + * the fast path, so let it as-is. */ +unsigned long efrm_buffer_table_size(void); + +/*-------------------------------------------------------------------- + * + * buffer table operations through the HW independent API + * + *--------------------------------------------------------------------*/ + +/*! free a previously allocated region of buffer table space */ +extern void efrm_buffer_table_free(struct efhw_buffer_table_allocation *a); + +/*! commit the update of a buffer table entry to every NIC */ +void efrm_buffer_table_commit(void); + +/*! set a given buffer table entry. [pa] should be the physical + address of pinned down memory. This function can only be called from + the char driver */ +void efrm_buffer_table_set(struct efhw_buffer_table_allocation *a, + unsigned i, dma_addr_t dma_addr, int owner); + +#endif /* __CI_EFRM_BUFFER_TABLE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/debug.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/debug.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,78 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides debug-related API for efrm library using Linux kernel + * primitives. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_DEBUG_LINUX_H__ +#define __CI_EFRM_DEBUG_LINUX_H__ + +#define EFRM_PRINTK_PREFIX "[sfc efrm] " + +#define EFRM_PRINTK(level, fmt, ...) \ + printk(level EFRM_PRINTK_PREFIX fmt "\n", __VA_ARGS__) + +/* Following macros should be used with non-zero format parameters + * due to __VA_ARGS__ limitations. Use "%s" with __FUNCTION__ if you can't + * find better parameters. */ +#define EFRM_ERR(fmt, ...) EFRM_PRINTK(KERN_ERR, fmt, __VA_ARGS__) +#define EFRM_WARN(fmt, ...) EFRM_PRINTK(KERN_WARNING, fmt, __VA_ARGS__) +#define EFRM_NOTICE(fmt, ...) EFRM_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__) +#if 0 && !defined(NDEBUG) +#define EFRM_TRACE(fmt, ...) EFRM_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__) +#else +#define EFRM_TRACE(fmt, ...) +#endif + +#ifndef NDEBUG +#define EFRM_ASSERT(cond) BUG_ON((cond) == 0) +#define _EFRM_ASSERT(cond, file, line) \ + do { \ + if (unlikely(!(cond))) { \ + EFRM_ERR("assertion \"%s\" failed at %s %d", \ + #cond, file, line); \ + BUG(); \ + } \ + } while (0) + +#define EFRM_DO_DEBUG(expr) expr +#define EFRM_VERIFY_EQ(expr, val) EFRM_ASSERT((expr) == (val)) +#else +#define EFRM_ASSERT(cond) +#define EFRM_DO_DEBUG(expr) +#define EFRM_VERIFY_EQ(expr, val) expr +#endif + +#endif /* __CI_EFRM_DEBUG_LINUX_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/driver_private.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,86 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides private API of efrm library to be used from the SFC + * resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_DRIVER_PRIVATE_H__ +#define __CI_EFRM_DRIVER_PRIVATE_H__ + +#include +#include + +/*-------------------------------------------------------------------- + * + * global variables + * + *--------------------------------------------------------------------*/ + +/* Internal structure for resource driver */ +extern struct efrm_resource_manager *efrm_rm_table[]; + +/*-------------------------------------------------------------------- + * + * efrm_nic_table handling + * + *--------------------------------------------------------------------*/ + +extern int efrm_driver_ctor(void); +extern int efrm_driver_dtor(void); +extern int efrm_driver_register_nic(struct efhw_nic *, int nic_index); +extern int efrm_driver_unregister_nic(struct efhw_nic *); + +/*-------------------------------------------------------------------- + * + * create/destroy resource managers + * + *--------------------------------------------------------------------*/ + +struct vi_resource_dimensions { + unsigned evq_int_min, evq_int_max; + unsigned evq_timer_min, evq_timer_max; + unsigned rxq_min, rxq_max; + unsigned txq_min, txq_max; +}; + +/*! Initialise resources */ +extern int +efrm_resources_init(const struct vi_resource_dimensions *, + int buffer_table_min, int buffer_table_max); + +/*! Tear down resources */ +extern void efrm_resources_fini(void); + +#endif /* __CI_EFRM_DRIVER_PRIVATE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/filter.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/filter.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,147 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides public API for filter resource. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_FILTER_H__ +#define __CI_EFRM_FILTER_H__ + +#include +#include +#include +#include + +/*! Comment? */ +struct filter_resource { + struct efrm_resource rs; + struct vi_resource *pt; + int filter_idx; + efrm_nic_set_t nic_set; +}; + +#define filter_resource(rs1) container_of((rs1), struct filter_resource, rs) + +/*! + * Allocate filter resource. + * + * \param vi_parent VI resource to use as parent. The function takes + * reference to the VI resource on success. + * \param frs_out pointer to return the new filter resource + * + * \return status code; if non-zero, frs_out is unchanged + */ +extern int +efrm_filter_resource_alloc(struct vi_resource *vi_parent, + struct filter_resource **frs_out); + +/* efrm_filter_resource_free should be called only if + * __efrm_resource_ref_count_zero() returned true. + * The easiest way is to call efrm_filter_resource_release() */ +void efrm_filter_resource_free(struct filter_resource *frs); +static inline void efrm_filter_resource_release(struct filter_resource *frs) +{ + unsigned id; + + EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 0); + id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle); + + if (atomic_dec_and_test(&frs->rs.rs_ref_count)) { + if (__efrm_resource_ref_count_zero(EFRM_RESOURCE_FILTER, id)) { + EFRM_ASSERT(EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle) == + id); + efrm_filter_resource_free(frs); + } + } +} + +/*-------------------------------------------------------------------- + *! + * Called to set/change the PT endpoint of a filter + * + * Example of use is TCP helper when it finds a wildcard IP filter + * needs to change which application it delivers traffic to + * + * \param frs filter resource + * \param pt_handle handle of new PT endpoint + * + * \return standard error codes + * + *--------------------------------------------------------------------*/ +extern int +efrm_filter_resource_set_ptresource(struct filter_resource *frs, + struct vi_resource *virs); + +extern int efrm_filter_resource_clear(struct filter_resource *frs); + +extern int __efrm_filter_resource_set(struct filter_resource *frs, int type, + unsigned saddr_be32, uint16_t sport_be16, + unsigned daddr_be32, uint16_t dport_be16); + +static inline int +efrm_filter_resource_tcp_set(struct filter_resource *frs, + unsigned saddr, uint16_t sport, + unsigned daddr, uint16_t dport) +{ + int type; + + EFRM_ASSERT((saddr && sport) || (!saddr && !sport)); + + type = + saddr ? EFHW_IP_FILTER_TYPE_TCP_FULL : + EFHW_IP_FILTER_TYPE_TCP_WILDCARD; + + return __efrm_filter_resource_set(frs, type, + saddr, sport, daddr, dport); +} + +static inline int +efrm_filter_resource_udp_set(struct filter_resource *frs, + unsigned saddr, uint16_t sport, + unsigned daddr, uint16_t dport) +{ + int type; + + EFRM_ASSERT((saddr && sport) || (!saddr && !sport)); + + type = + saddr ? EFHW_IP_FILTER_TYPE_UDP_FULL : + EFHW_IP_FILTER_TYPE_UDP_WILDCARD; + + return __efrm_filter_resource_set(frs, + type, saddr, sport, daddr, dport); +} + +#endif /* __CI_EFRM_FILTER_H__ */ +/*! \cidoxg_end */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/iobufset.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,123 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides public API for iobufset resource. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_IOBUFSET_H__ +#define __CI_EFRM_IOBUFSET_H__ + +#include + +/*! Iobufset resource structture. + * Users should not access the structure fields directly, but use the API + * below. + * However, this structure should not be moved out of public headers, + * because part of API (ex. efrm_iobufset_dma_addr function) is inline and + * is used in the fast-path code. + */ +struct iobufset_resource { + struct efrm_resource rs; + struct vi_resource *evq; + struct efhw_buffer_table_allocation buf_tbl_alloc; + unsigned int faultonaccess; + unsigned int n_bufs; + unsigned int pages_per_contiguous_chunk; + unsigned order; + efhw_iopage_t bufs[1]; + /*!< up to n_bufs can follow this, so this must be the last member */ +}; + +#define iobufset_resource(rs1) \ + container_of((rs1), struct iobufset_resource, rs) + +/*! + * Allocate iobufset resource. + * + * \param vi_evq VI resource to use. The function takes + * reference to the VI resource on success. + * \param iobrs_out pointer to return the new filter resource + * + * \return status code; if non-zero, frs_out is unchanged + */ +extern int +efrm_iobufset_resource_alloc(int32_t n_pages, + int32_t pages_per_contiguous_chunk, + struct vi_resource *vi_evq, + bool phys_addr_mode, + uint32_t faultonaccess, + struct iobufset_resource **iobrs_out); + +/* efrm_iobufset_resource_free should be called only if + * __efrm_resource_ref_count_zero() returned true. + * The easiest way is to call efrm_iobufset_resource_release() */ +void efrm_iobufset_resource_free(struct iobufset_resource *rs); +static inline void +efrm_iobufset_resource_release(struct iobufset_resource *iobrs) +{ + unsigned id; + + EFRM_RESOURCE_ASSERT_VALID(&iobrs->rs, 0); + id = EFRM_RESOURCE_INSTANCE(iobrs->rs.rs_handle); + + if (atomic_dec_and_test(&iobrs->rs.rs_ref_count)) { + if (__efrm_resource_ref_count_zero(EFRM_RESOURCE_IOBUFSET, id)) + efrm_iobufset_resource_free(iobrs); + } +} + +static inline char * +efrm_iobufset_ptr(struct iobufset_resource *rs, unsigned offs) +{ + EFRM_ASSERT(offs < (unsigned)(rs->n_bufs << PAGE_SHIFT)); + return efhw_iopage_ptr(&rs->bufs[offs >> PAGE_SHIFT]) + + (offs & (PAGE_SIZE - 1)); +} + +static inline char *efrm_iobufset_page_ptr(struct iobufset_resource *rs, + unsigned page_i) +{ + EFRM_ASSERT(page_i < (unsigned)rs->n_bufs); + return efhw_iopage_ptr(&rs->bufs[page_i]); +} + +static inline dma_addr_t +efrm_iobufset_dma_addr(struct iobufset_resource *rs, unsigned offs) +{ + EFRM_ASSERT(offs < (unsigned)(rs->n_bufs << PAGE_SHIFT)); + return efhw_iopage_dma_addr(&rs->bufs[offs >> PAGE_SHIFT]) + + (offs & (PAGE_SIZE - 1)); +} + +#endif /* __CI_EFRM_IOBUFSET_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/nic_set.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/nic_set.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,104 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides public API for NIC sets. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_NIC_SET_H__ +#define __CI_EFRM_NIC_SET_H__ + +#include +#include +#include + +/*-------------------------------------------------------------------- + * + * efrm_nic_set_t - tracks which NICs something has been done on + * + *--------------------------------------------------------------------*/ + +/* Internal suructure of efrm_nic_set_t should not be referenced outside of + * this file. Add a new accessor if you should do it. */ +typedef struct { + uint32_t nics; +} efrm_nic_set_t; + +#if EFHW_MAX_NR_DEVS > 32 +#error change efrm_nic_set to handle EFHW_MAX_NR_DEVS number of devices +#endif + +static inline bool +efrm_nic_set_read(const efrm_nic_set_t *nic_set, unsigned index) +{ + EFRM_ASSERT(nic_set); + EFRM_ASSERT(index < EFHW_MAX_NR_DEVS && index < 32); + return (nic_set->nics & (1 << index)) ? true : false; +} + +static inline void +efrm_nic_set_write(efrm_nic_set_t *nic_set, unsigned index, bool value) +{ + EFRM_ASSERT(nic_set); + EFRM_ASSERT(index < EFHW_MAX_NR_DEVS && index < 32); + EFRM_ASSERT(value == false || value == true); + nic_set->nics = (nic_set->nics & (~(1 << index))) + (value << index); +} + +static inline void efrm_nic_set_clear(efrm_nic_set_t *nic_set) +{ + nic_set->nics = 0; +} + +static inline void efrm_nic_set_all(efrm_nic_set_t *nic_set) +{ + nic_set->nics = 0xffffffff; +} + +static inline bool efrm_nic_set_is_all_clear(efrm_nic_set_t *nic_set) +{ + return nic_set->nics == 0 ? true : false; +} + +#define EFRM_NIC_SET_FMT "%x" + +static inline uint32_t efrm_nic_set_pri_arg(efrm_nic_set_t *nic_set) +{ + return nic_set->nics; +} + +#define EFRM_FOR_EACH_NIC_INDEX_IN_SET(_set, _nic_i) \ + for ((_nic_i) = 0; (_nic_i) < EFHW_MAX_NR_DEVS; ++(_nic_i)) \ + if (efrm_nic_set_read((_set), (_nic_i))) + +#endif /* __CI_EFRM_NIC_SET_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/nic_table.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,98 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides public API for NIC table. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_NIC_TABLE_H__ +#define __CI_EFRM_NIC_TABLE_H__ + +#include +#include + +/*-------------------------------------------------------------------- + * + * struct efrm_nic_table - top level driver object keeping all NICs - + * implemented in driver_object.c + * + *--------------------------------------------------------------------*/ + +/*! Comment? */ +struct efrm_nic_table { + /*! nics attached to this driver */ + struct efhw_nic *nic[EFHW_MAX_NR_DEVS]; + /*! pointer to an arbitrary struct efhw_nic if one exists; + * for code which does not care which NIC it wants but + * still needs one. Note you cannot assume nic[0] exists. */ + struct efhw_nic *a_nic; + uint32_t nic_count; /*!< number of nics attached to this driver */ + spinlock_t lock; /*!< lock for table modifications */ + atomic_t ref_count; /*!< refcount for users of nic table */ +}; + +/* Resource driver structures used by other drivers as well */ +extern struct efrm_nic_table efrm_nic_table; + +static inline void efrm_nic_table_hold(void) +{ + atomic_inc(&efrm_nic_table.ref_count); +} + +static inline void efrm_nic_table_rele(void) +{ + atomic_dec(&efrm_nic_table.ref_count); +} + +static inline int efrm_nic_table_held(void) +{ + return (atomic_read(&efrm_nic_table.ref_count) != 0); +} + +/* Run code block _x multiple times with variable nic set to each + * registered NIC in turn. + * DO NOT "break" out of this loop early. */ +#define EFRM_FOR_EACH_NIC(_nic_i, _nic) \ + for ((_nic_i) = (efrm_nic_table_hold(), 0); \ + (_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \ + (_nic_i)++) \ + if (((_nic) = efrm_nic_table.nic[_nic_i])) + +#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \ + for ((_i) = (efrm_nic_table_hold(), 0); \ + (_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \ + ++(_i)) \ + if (((_nic) = efrm_nic_table.nic[_i]) && \ + efrm_nic_set_read((_set), (_i))) + +#endif /* __CI_EFRM_NIC_TABLE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/private.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/private.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,141 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides private API of efrm library -- resource handling. + * This API is not designed for use outside of SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_PRIVATE_H__ +#define __CI_EFRM_PRIVATE_H__ + +#include +#include +#include +#include + +/*-------------------------------------------------------------------- + * + * create resource managers + * + *--------------------------------------------------------------------*/ + +/*! Create a resource manager for various types of resources + */ +extern int +efrm_create_iobufset_resource_manager(struct efrm_resource_manager **out); + +extern int +efrm_create_filter_resource_manager(struct efrm_resource_manager **out); + +extern int +efrm_create_vi_resource_manager(struct efrm_resource_manager **out, + const struct vi_resource_dimensions *); + +/*-------------------------------------------------------------------- + * + * efrm_resource_handle_t handling + * + *--------------------------------------------------------------------*/ + +/*! Initialize an area of memory to be used as a resource */ +static inline void efrm_resource_init(struct efrm_resource *rs, + int type, int instance) +{ + EFRM_ASSERT(instance >= 0); + EFRM_ASSERT(type >= 0 && type < EFRM_RESOURCE_NUM); + atomic_set(&rs->rs_ref_count, 1); + rs->rs_handle.handle = (type << 28u) | + (((unsigned)jiffies & 0xfff) << 16) | instance; +} + +/*-------------------------------------------------------------------- + * + * Instance pool management + * + *--------------------------------------------------------------------*/ + +/*! Allocate instance pool. Use kfifo_vfree to destroy it. */ +static inline int +efrm_kfifo_id_ctor(struct kfifo **ids_out, + unsigned int base, unsigned int limit, spinlock_t *lock) +{ + unsigned int i; + struct kfifo *ids; + unsigned char *buffer; + unsigned int size = roundup_pow_of_two((limit - base) * sizeof(int)); + + EFRM_ASSERT(base <= limit); + buffer = vmalloc(size); + ids = kfifo_init(buffer, size, GFP_KERNEL, lock); + if (IS_ERR(ids)) + return PTR_ERR(ids); + for (i = base; i < limit; i++) + EFRM_VERIFY_EQ(__kfifo_put(ids, (unsigned char *)&i, + sizeof(i)), sizeof(i)); + + *ids_out = ids; + return 0; +} + +/*-------------------------------------------------------------------- + * + * Various private functions + * + *--------------------------------------------------------------------*/ + +/*! Initialize the fields in the provided resource manager memory area + * \param rm The area of memory to be initialized + * \param dtor A method to destroy the resource manager + * \param name A Textual name for the resource manager + * \param type The type of resource managed + * \param initial_table_size Initial size of the ID table + * \param auto_destroy Destroy resource manager on driver onload iff true + * + * A default table size is provided if the value 0 is provided. + */ +extern int +efrm_resource_manager_ctor(struct efrm_resource_manager *rm, + void (*dtor)(struct efrm_resource_manager *), + const char *name, unsigned type, + int initial_table_size); + +extern void efrm_resource_manager_dtor(struct efrm_resource_manager *rm); + +/*! Insert a resource into table in the resource manager. + * + * Caller should free the resource if this function returns non-zero. + */ +extern int efrm_resource_manager_insert(struct efrm_resource *rs); + +#endif /* __CI_EFRM_PRIVATE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/resource.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/resource.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,122 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides public interface of efrm library -- resource handling. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_RESOURCE_H__ +#define __CI_EFRM_RESOURCE_H__ + +/*-------------------------------------------------------------------- + * + * headers for type dependencies + * + *--------------------------------------------------------------------*/ + +#include +#include +#include +#include + +#ifndef __ci_driver__ +#error "Driver-only file" +#endif + +/*-------------------------------------------------------------------- + * + * struct efrm_resource - represents an allocated resource + * (eg. pinned pages of memory, or resource on a NIC) + * + *--------------------------------------------------------------------*/ + +/*! Representation of an allocated resource */ +struct efrm_resource { + atomic_t rs_ref_count; /*!< users count; see + * __efrm_resource_ref_count_zero() */ + efrm_resource_handle_t rs_handle; +}; + +/*-------------------------------------------------------------------- + * + * managed resource abstraction + * + *--------------------------------------------------------------------*/ + +/*! Factory for resources of a specific type */ +struct efrm_resource_manager { + const char *rm_name; /*!< human readable only */ + spinlock_t rm_lock; +#ifndef NDEBUG + unsigned rm_type; +#endif + int rm_resources; + int rm_resources_hiwat; + /*! table of allocated resources */ + struct efrm_resource **rm_table; + unsigned rm_table_size; + /** + * Destructor for the resource manager. Other resource managers + * might be already dead, although the system guarantees that + * managers are destructed in the order by which they were created + */ + void (*rm_dtor)(struct efrm_resource_manager *); +}; + +#ifdef NDEBUG +# define EFRM_RESOURCE_ASSERT_VALID(rs, rc_mbz) +# define EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm) +#else +/*! Check validity of resource and report on failure */ +extern void efrm_resource_assert_valid(struct efrm_resource *, + int rc_may_be_zero, + const char *file, int line); +# define EFRM_RESOURCE_ASSERT_VALID(rs, rc_mbz) \ + efrm_resource_assert_valid((rs), (rc_mbz), __FILE__, __LINE__) + +/*! Check validity of resource manager and report on failure */ +extern void efrm_resource_manager_assert_valid(struct efrm_resource_manager *, + const char *file, int line); +# define EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm) \ + efrm_resource_manager_assert_valid((rm), __FILE__, __LINE__) +#endif + +/*! Check the reference count on the resource provided and delete its + * handle it in its owning resource manager if the + * reference count has fallen to zero. + * + * Returns TRUE if the caller should really free the resource. + */ +extern bool __efrm_resource_ref_count_zero(unsigned type, unsigned instance); + +#endif /* __CI_EFRM_RESOURCE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/resource_id.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,104 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides public type and definitions resource handle, and the + * definitions of resource types. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_EFRM_RESOURCE_ID_H__ +#define __CI_DRIVER_EFRM_RESOURCE_ID_H__ + +/*********************************************************************** + * Resource handles + * + * Resource handles are intended for identifying resources at kernel + * level, within the context of a particular NIC. particularly because + * for some resource types, the low 16 bites correspond to hardware + * IDs. They were historically also used at user level, with a nonce + * stored in the bits 16 to 27 (inclusive), but that approach is + * deprecated (but sill alive!). + * + * The handle value 0 is used to mean "no resource". + * Identify resources within the context of a file descriptor at user + * level. + ***********************************************************************/ + +typedef struct efrm_resource_handle_s { + uint32_t handle; +} efrm_resource_handle_t; + +/* You may think these following functions should all have + * _HANDLE_ in their names, but really we are providing an abstract set + * of methods on a (hypothetical) efrm_resource_t object, with + * efrm_resource_handle_t being just the reference one holds to access + * the object (aka "this" or "self"). + */ + +/* Below I use inline instead of macros where possible in order to get + * more type checking help from the compiler; hopefully we'll never + * have to rewrite these to use #define as we've found some horrible + * compiler on which we cannot make static inline do the Right Thing (tm). + * + * For consistency and to avoid pointless change I spell these + * routines as macro names (CAPTILIZE_UNDERSCORED), which also serves + * to remind people they are compact and inlined. + */ + +#define EFRM_RESOURCE_FMT "[rs:%08x]" + +static inline unsigned EFRM_RESOURCE_PRI_ARG(efrm_resource_handle_t h) +{ + return (h.handle); +} + +static inline unsigned EFRM_RESOURCE_INSTANCE(efrm_resource_handle_t h) +{ + return (h.handle & 0x0000ffff); +} + +static inline unsigned EFRM_RESOURCE_TYPE(efrm_resource_handle_t h) +{ + return (h.handle & 0xf0000000) >> 28; +} + +/*********************************************************************** + * Resource type codes + ***********************************************************************/ + +#define EFRM_RESOURCE_IOBUFSET 0x0 +#define EFRM_RESOURCE_VI 0x1 +#define EFRM_RESOURCE_FILTER 0x2 +#define EFRM_RESOURCE_NUM 0x3 /* This isn't a resource! */ + +#define EFRM_RESOURCE_NAME(type) \ + ((type) == EFRM_RESOURCE_IOBUFSET? "IOBUFSET" : \ + (type) == EFRM_RESOURCE_VI? "VI" : \ + (type) == EFRM_RESOURCE_FILTER? "FILTER" : \ + "") + +#endif /* __CI_DRIVER_EFRM_RESOURCE_ID_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/sysdep.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,54 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides Linux-like system-independent API for efrm library. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_SYSDEP_H__ +#define __CI_EFRM_SYSDEP_H__ + +/* Spinlocks are defined in efhw/sysdep.h */ +#include + +#if defined(__linux__) && defined(__KERNEL__) + +# include + +#else + +# include + +#endif + +#endif /* __CI_EFRM_SYSDEP_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/sysdep_linux.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,248 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides version-independent Linux kernel API for efrm library. + * Only kernels >=2.6.9 are supported. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Kfifo API is partially stolen from linux-2.6.22/include/linux/list.h + * Copyright (C) 2004 Stelian Pop + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_SYSDEP_LINUX_H__ +#define __CI_EFRM_SYSDEP_LINUX_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) +/* get roundup_pow_of_two(), which was in kernel.h in early kernel versions */ +#include +#endif + +/******************************************************************** + * + * List API + * + ********************************************************************/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) +static inline void +list_replace_init(struct list_head *old, struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; + INIT_LIST_HEAD(old); +} +#endif + +static inline struct list_head *list_pop(struct list_head *list) +{ + struct list_head *link = list->next; + list_del(link); + return link; +} + +static inline struct list_head *list_pop_tail(struct list_head *list) +{ + struct list_head *link = list->prev; + list_del(link); + return link; +} + +/******************************************************************** + * + * Workqueue API + * + ********************************************************************/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +#define NEED_OLD_WORK_API + +/** + * The old and new work function prototypes just change + * the type of the pointer in the only argument, so it's + * safe to cast one function type to the other + */ +typedef void (*efrm_old_work_func_t) (void *p); + +#undef INIT_WORK +#define INIT_WORK(_work, _func) \ + do { \ + INIT_LIST_HEAD(&(_work)->entry); \ + (_work)->pending = 0; \ + PREPARE_WORK((_work), \ + (efrm_old_work_func_t) (_func), \ + (_work)); \ + } while (0) + +#endif + +/******************************************************************** + * + * Kfifo API + * + ********************************************************************/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + +#if !defined(RHEL_RELEASE_CODE) || (RHEL_RELEASE_CODE < 1029) +typedef unsigned gfp_t; +#endif + +#define HAS_NO_KFIFO + +struct kfifo { + unsigned char *buffer; /* the buffer holding the data */ + unsigned int size; /* the size of the allocated buffer */ + unsigned int in; /* data is added at offset (in % size) */ + unsigned int out; /* data is extracted from off. (out % size) */ + spinlock_t *lock; /* protects concurrent modifications */ +}; + +extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, + gfp_t gfp_mask, spinlock_t *lock); +extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, + spinlock_t *lock); +extern void kfifo_free(struct kfifo *fifo); +extern unsigned int __kfifo_put(struct kfifo *fifo, + unsigned char *buffer, unsigned int len); +extern unsigned int __kfifo_get(struct kfifo *fifo, + unsigned char *buffer, unsigned int len); + +/** + * kfifo_put - puts some data into the FIFO + * @fifo: the fifo to be used. + * @buffer: the data to be added. + * @len: the length of the data to be added. + * + * This function copies at most @len bytes from the @buffer into + * the FIFO depending on the free space, and returns the number of + * bytes copied. + */ +static inline unsigned int +kfifo_put(struct kfifo *fifo, unsigned char *buffer, unsigned int len) +{ + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(fifo->lock, flags); + + ret = __kfifo_put(fifo, buffer, len); + + spin_unlock_irqrestore(fifo->lock, flags); + + return ret; +} + +/** + * kfifo_get - gets some data from the FIFO + * @fifo: the fifo to be used. + * @buffer: where the data must be copied. + * @len: the size of the destination buffer. + * + * This function copies at most @len bytes from the FIFO into the + * @buffer and returns the number of copied bytes. + */ +static inline unsigned int +kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len) +{ + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(fifo->lock, flags); + + ret = __kfifo_get(fifo, buffer, len); + + /* + * optimization: if the FIFO is empty, set the indices to 0 + * so we don't wrap the next time + */ + if (fifo->in == fifo->out) + fifo->in = fifo->out = 0; + + spin_unlock_irqrestore(fifo->lock, flags); + + return ret; +} + +/** + * __kfifo_len - returns the number of bytes available in the FIFO, no locking version + * @fifo: the fifo to be used. + */ +static inline unsigned int __kfifo_len(struct kfifo *fifo) +{ + return fifo->in - fifo->out; +} + +/** + * kfifo_len - returns the number of bytes available in the FIFO + * @fifo: the fifo to be used. + */ +static inline unsigned int kfifo_len(struct kfifo *fifo) +{ + unsigned long flags; + unsigned int ret; + + spin_lock_irqsave(fifo->lock, flags); + + ret = __kfifo_len(fifo); + + spin_unlock_irqrestore(fifo->lock, flags); + + return ret; +} + +#else +#include +#endif + +static inline void kfifo_vfree(struct kfifo *fifo) +{ + vfree(fifo->buffer); + kfree(fifo); +} + +#endif /* __CI_EFRM_SYSDEP_LINUX_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,171 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains public API for VI resource. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_VI_RESOURCE_H__ +#define __CI_EFRM_VI_RESOURCE_H__ + +#include +#include +#include + +struct vi_resource; + +/* Make these inline instead of macros for type checking */ +static inline struct vi_resource * +efrm_to_vi_resource(struct efrm_resource *rs) +{ + EFRM_ASSERT(EFRM_RESOURCE_TYPE(rs->rs_handle) == EFRM_RESOURCE_VI); + return (struct vi_resource *) rs; +} +static inline struct +efrm_resource *efrm_from_vi_resource(struct vi_resource *rs) +{ + return (struct efrm_resource *)rs; +} + +#define EFAB_VI_RESOURCE_INSTANCE(virs) \ + EFRM_RESOURCE_INSTANCE(efrm_from_vi_resource(virs)->rs_handle) + +#define EFAB_VI_RESOURCE_PRI_ARG(virs) \ + EFRM_RESOURCE_PRI_ARG(efrm_from_vi_resource(virs)->rs_handle) + +extern int +efrm_vi_resource_alloc(struct vi_resource *evq_virs, + uint16_t vi_flags, int32_t evq_capacity, + int32_t txq_capacity, int32_t rxq_capacity, + uint8_t tx_q_tag, uint8_t rx_q_tag, + struct vi_resource **virs_in_out, + uint32_t *out_io_mmap_bytes, + uint32_t *out_mem_mmap_bytes, + uint32_t *out_txq_capacity, + uint32_t *out_rxq_capacity); + +static inline void efrm_vi_resource_ref(struct vi_resource *virs) +{ + atomic_inc(&efrm_from_vi_resource(virs)->rs_ref_count); +} + +/* efrm_vi_resource_free should be called only if + * __efrm_resource_ref_count_zero() returned true. + * The easiest way is to call efrm_vi_resource_release() */ +extern void efrm_vi_resource_free(struct vi_resource *virs); +static inline void efrm_vi_resource_release(struct vi_resource *virs) +{ + unsigned id; + struct efrm_resource *rs = efrm_from_vi_resource(virs); + + id = EFRM_RESOURCE_INSTANCE(rs->rs_handle); + + if (atomic_dec_and_test(&rs->rs_ref_count)) { + if (__efrm_resource_ref_count_zero(EFRM_RESOURCE_VI, id)) { + EFRM_ASSERT(EFRM_RESOURCE_INSTANCE(rs->rs_handle) == + id); + efrm_vi_resource_free(virs); + } + } +} + +/*-------------------------------------------------------------------- + * + * eventq handling + * + *--------------------------------------------------------------------*/ + +/*! Reset an event queue and clear any associated timers */ +extern void efrm_eventq_reset(struct vi_resource *virs, int nic_index); + +/*! Register a kernel-level handler for the event queue. This function is + * called whenever a timer expires, or whenever the event queue is woken + * but no thread is blocked on it. + * + * This function returns -EBUSY if a callback is already installed. + * + * \param rs Event-queue resource + * \param handler Callback-handler + * \param arg Argument to pass to callback-handler + * \return Status code + */ +extern int +efrm_eventq_register_callback(struct vi_resource *rs, + void (*handler)(void *arg, int is_timeout, + struct efhw_nic *nic), + void *arg); + +/*! Kill the kernel-level callback. + * + * This function stops the timer from running and unregisters the callback + * function. It waits for any running timeout handlers to complete before + * returning. + * + * \param rs Event-queue resource + * \return Nothing + */ +extern void efrm_eventq_kill_callback(struct vi_resource *rs); + +/*! Ask the NIC to generate a wakeup when an event is next delivered. */ +extern void efrm_eventq_request_wakeup(struct vi_resource *rs, + unsigned current_ptr, + unsigned nic_index); + +/*! Register a kernel-level handler for flush completions. + * \TODO Currently, it is unsafe to install a callback more than once. + * + * \param rs VI resource being flushed. + * \param handler Callback handler function. + * \param arg Argument to be passed to handler. + */ +extern void +efrm_vi_register_flush_callback(struct vi_resource *rs, + void (*handler)(void *), + void *arg); + +int efrm_vi_resource_flush_retry(struct vi_resource *virs); + +/*! Comment? */ +extern int efrm_pt_flush(struct vi_resource *); + +/*! Comment? */ +extern int efrm_pt_pace(struct vi_resource *, unsigned int val); + +uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs + /*,struct efhw_nic *nic */ ); +uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs + /*,struct efhw_nic *nic */ ); +uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs + /*,struct efhw_nic *nic */ ); + +#endif /* __CI_EFRM_VI_RESOURCE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_manager.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,182 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains type definitions for VI resource. These types + * may be used outside of the SFC resource driver, but such use is not + * recommended. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__ +#define __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__ + +#include +#include +#include + +#define EFRM_VI_RM_DMA_QUEUE_COUNT 2 +#define EFRM_VI_RM_DMA_QUEUE_TX 0 +#define EFRM_VI_RM_DMA_QUEUE_RX 1 + +/** Numbers of bits which can be set in the evq_state member of + * vi_resource_evq_info. */ +enum { + /** This bit is set if a wakeup has been requested on the NIC. */ + VI_RESOURCE_EVQ_STATE_WAKEUP_PENDING, + /** This bit is set if the wakeup is valid for the sleeping + * process. */ + VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, + /** This bit is set if a wakeup or timeout event is currently being + * processed. */ + VI_RESOURCE_EVQ_STATE_BUSY, +}; +#define VI_RESOURCE_EVQ_STATE(X) \ + (((int32_t)1) << (VI_RESOURCE_EVQ_STATE_##X)) + +/** Information about an event queue. */ +struct vi_resource_evq_info { + /** Flag bits indicating the state of wakeups. */ + unsigned long evq_state; + /** A pointer to the resource instance for this queue. This member + * is only valid if evq_state is non-zero or the resource is known + * to have a non-zero reference count. */ + struct vi_resource *evq_virs; +}; + +#ifdef __ci_ul_driver__ +#define EFRM_VI_USE_WORKQUEUE 0 +#else +#define EFRM_VI_USE_WORKQUEUE 1 +#endif + +/*! Global information for the VI resource manager. */ +struct vi_resource_manager { + struct efrm_resource_manager rm; + + struct kfifo *instances_with_timer; + int with_timer_base; + int with_timer_limit; + struct kfifo *instances_with_interrupt; + int with_interrupt_base; + int with_interrupt_limit; + + bool iscsi_dmaq_instance_is_free; + struct vi_resource_evq_info *evq_infos; + + /* We keep VI resources which need flushing on these lists. The VI + * is put on the outstanding list when the flush request is issued + * to the hardware and removed when the flush event arrives. The + * hardware can only handle a limited number of RX flush requests at + * once, so VIs are placed in the waiting list until the flush can + * be issued. Flushes can be requested by the client or internally + * by the VI resource manager. In the former case, the reference + * count must be non-zero for the duration of the flush and in the + * later case, the reference count must be zero. */ + struct list_head rx_flush_waiting_list; + struct list_head rx_flush_outstanding_list; + struct list_head tx_flush_outstanding_list; + int rx_flush_outstanding_count; + + /* once the flush has happened we push the close into the work queue + * so its OK on Windows to free the resources (Bug 3469). Resources + * on this list have zero reference count. + */ + struct list_head close_pending; + struct work_struct work_item; +#if EFRM_VI_USE_WORKQUEUE + struct workqueue_struct *workqueue; +#endif +}; + +struct vi_resource_nic_info { + struct eventq_resource_hardware evq_pages; +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + efhw_iopages_t dmaq_pages[EFRM_VI_RM_DMA_QUEUE_COUNT]; +#endif +}; + +struct vi_resource { + /* Some macros make the assumption that the struct efrm_resource is + * the first member of a struct vi_resource. */ + struct efrm_resource rs; + atomic_t evq_refs; /*!< Number of users of the event queue. */ + + efrm_nic_set_t nic_set; + + uint32_t bar_mmap_bytes; + uint32_t mem_mmap_bytes; + + int32_t evq_capacity; + int32_t dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_COUNT]; + + uint8_t dmaq_tag[EFRM_VI_RM_DMA_QUEUE_COUNT]; + uint16_t flags; + + /* we keep PT endpoints that have been destroyed on a list + * until we have seen their TX and RX DMAQs flush complete + * (see Bug 1217) + */ + struct list_head rx_flush_link; + struct list_head tx_flush_link; + efrm_nic_set_t rx_flush_nic_set; + efrm_nic_set_t rx_flush_outstanding_nic_set; + efrm_nic_set_t tx_flush_nic_set; + uint64_t flush_time; + int flush_count; + + void (*flush_callback_fn)(void *); + void *flush_callback_arg; + + void (*evq_callback_fn) (void *arg, int is_timeout, + struct efhw_nic *nic); + void *evq_callback_arg; + + struct vi_resource *evq_virs; /*!< EVQ for DMA queues */ + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + struct efhw_buffer_table_allocation + dmaq_buf_tbl_alloc[EFRM_VI_RM_DMA_QUEUE_COUNT]; +#endif + + struct vi_resource_nic_info nic_info[EFHW_MAX_NR_DEVS]; +}; + +#undef vi_resource +#define vi_resource(rs1) container_of((rs1), struct vi_resource, rs) + +static inline dma_addr_t +efrm_eventq_dma_addr(struct vi_resource *virs, uint32_t nic_index) +{ + struct eventq_resource_hardware *hw; + EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index)); + + hw = &(virs->nic_info[nic_index].evq_pages); + + return efhw_iopages_dma_addr(&(hw->iobuff)) + hw->iobuff_off; +} + +#endif /* __CI_DRIVER_EFAB_VI_RESOURCE_MANAGER_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/ci/efrm/vi_resource_private.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,83 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains private API for VI resource. The API is not designed + * to be used outside of the SFC resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __CI_EFRM_VI_RESOURCE_PRIVATE_H__ +#define __CI_EFRM_VI_RESOURCE_PRIVATE_H__ + +#include +#include + +extern struct vi_resource_manager *efrm_vi_manager; + +/*************************************************************************/ + +extern void efrm_vi_rm_delayed_free(struct work_struct *data); + +extern void efrm_vi_rm_salvage_flushed_vis(void); + +void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs); + +void efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_index, + struct efhw_nic *nic); + +static inline int +efrm_eventq_bytes(struct vi_resource *virs, uint32_t nic_index) +{ + EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index)); + + return efrm_vi_rm_evq_bytes(virs); +} + +static inline efhw_event_t * +efrm_eventq_base(struct vi_resource *virs, uint32_t nic_index) +{ + struct eventq_resource_hardware *hw; + + EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index)); + + hw = &(virs->nic_info[nic_index].evq_pages); + + return (efhw_event_t *) (efhw_iopages_ptr(&(hw->iobuff)) + + hw->iobuff_off); +} + +/*! Wakeup handler, see efhw_ev_handler_t for prototype */ +extern void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev); + +/*! Timeout handler, see efhw_ev_handler_t for prototype */ +extern void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev); + +/*! DMA flush handler, see efhw_ev_handler_t for prototype */ +extern void efrm_handle_dmaq_flushed(struct efhw_nic *nic, int instance, + int rx_flush); + +#endif /* __CI_EFRM_VI_RESOURCE_PRIVATE_H__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/driver_object.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/driver_object.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,174 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains support for the global driver variables. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include + +/* We use #define rather than static inline here so that the Windows + * "prefast" compiler can see its own locking primitive when these + * two function are used (and then perform extra checking where they + * are used) + * + * Both macros operate on an irq_flags_t +*/ + +#define efrm_driver_lock(irqlock_state) \ + spin_lock_irqsave(&efrm_nic_table.lock, irqlock_state) + +#define efrm_driver_unlock(irqlock_state) \ + spin_unlock_irqrestore(&efrm_nic_table.lock, \ + irqlock_state); + +/* These routines are all methods on the architecturally singleton + global variables: efrm_nic_table, efrm_rm_table. + + I hope we never find a driver model that does not allow global + structure variables :) (but that would break almost every driver I've + ever seen). +*/ + +/*! Exported driver state */ +struct efrm_nic_table efrm_nic_table; +EXPORT_SYMBOL(efrm_nic_table); + +/* Internal table with resource managers. + * We'd like to not export it, but we are still using efrm_rm_table + * in the char driver. So, it is declared in the private header with + * a purpose. */ +struct efrm_resource_manager *efrm_rm_table[EFRM_RESOURCE_NUM]; +EXPORT_SYMBOL(efrm_rm_table); + +int efrm_driver_ctor(void) +{ + memset(&efrm_nic_table, 0, sizeof(efrm_nic_table)); + memset(&efrm_rm_table, 0, sizeof(efrm_rm_table)); + + spin_lock_init(&efrm_nic_table.lock); + + EFRM_TRACE("%s: driver created", __FUNCTION__); + return 0; +} + +int efrm_driver_dtor(void) +{ + EFRM_ASSERT(!efrm_nic_table_held()); + + spin_lock_destroy(&efrm_nic_table.lock); + EFRM_TRACE("%s: driver deleted", __FUNCTION__); + return 0; +} + +int efrm_driver_register_nic(struct efhw_nic *nic, int nic_index) +{ + int rc = 0; + irq_flags_t lock_flags; + + EFRM_ASSERT(nic_index >= 0); + + efrm_driver_lock(lock_flags); + + if (efrm_nic_table_held()) { + EFRM_WARN("%s: driver object is in use", __FUNCTION__); + rc = -EBUSY; + goto done; + } + + if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) { + EFRM_WARN("%s: filled up NIC table size %d", __FUNCTION__, + EFHW_MAX_NR_DEVS); + rc = -E2BIG; + goto done; + } + + EFRM_ASSERT(efrm_nic_table.nic[nic_index] == NULL); + efrm_nic_table.nic[nic_index] = nic; + nic->index = nic_index; + + if (efrm_nic_table.a_nic == NULL) + efrm_nic_table.a_nic = nic; + + efrm_nic_table.nic_count++; + efrm_driver_unlock(lock_flags); + return rc; + +done: + efrm_driver_unlock(lock_flags); + return rc; +} + +int efrm_driver_unregister_nic(struct efhw_nic *nic) +{ + int rc = 0; + int nic_index = nic->index; + irq_flags_t lock_flags; + + EFRM_ASSERT(nic_index >= 0); + + efrm_driver_lock(lock_flags); + + if (efrm_nic_table_held()) { + EFRM_WARN("%s: driver object is in use", __FUNCTION__); + rc = -EBUSY; + goto done; + } + + EFRM_ASSERT(efrm_nic_table.nic[nic_index] == nic); + + nic->index = -1; + efrm_nic_table.nic[nic_index] = NULL; + + --efrm_nic_table.nic_count; + + if (efrm_nic_table.a_nic == nic) { + if (efrm_nic_table.nic_count == 0) { + efrm_nic_table.a_nic = NULL; + } else { + for (nic_index = 0; nic_index < EFHW_MAX_NR_DEVS; + nic_index++) { + if (efrm_nic_table.nic[nic_index] != NULL) + efrm_nic_table.a_nic = + efrm_nic_table.nic[nic_index]; + } + EFRM_ASSERT(efrm_nic_table.a_nic); + } + } + +done: + efrm_driver_unlock(lock_flags); + return rc; +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/driverlink_new.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/driverlink_new.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,290 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains driverlink code which interacts with the sfc network + * driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include "linux_resource_internal.h" +#include "driverlink_api.h" +#include "kernel_compat.h" +#include + +#include +#include + +/* The DL driver and associated calls */ +static int efrm_dl_probe(struct efx_dl_device *efrm_dev, + const struct net_device *net_dev, + const struct efx_dl_device_info *dev_info, + const char *silicon_rev); + +static void efrm_dl_remove(struct efx_dl_device *efrm_dev); + +static void efrm_dl_reset_suspend(struct efx_dl_device *efrm_dev); + +static void efrm_dl_reset_resume(struct efx_dl_device *efrm_dev, int ok); + +static void efrm_dl_mtu_changed(struct efx_dl_device *, int); +static void efrm_dl_event_falcon(struct efx_dl_device *efx_dev, void *p_event); + +static struct efx_dl_driver efrm_dl_driver = { + .name = "resource", + .probe = efrm_dl_probe, + .remove = efrm_dl_remove, + .reset_suspend = efrm_dl_reset_suspend, + .reset_resume = efrm_dl_reset_resume +}; + +static void +init_vi_resource_dimensions(struct vi_resource_dimensions *rd, + const struct efx_dl_falcon_resources *res) +{ + rd->evq_timer_min = res->evq_timer_min; + rd->evq_timer_max = res->evq_timer_max; + rd->evq_int_min = res->evq_int_min; + rd->evq_int_max = res->evq_int_max; + rd->rxq_min = res->rxq_min; + rd->rxq_max = res->rxq_max; + rd->txq_min = res->txq_min; + rd->txq_max = res->txq_max; + EFRM_TRACE + ("Using evq_int(%d-%d) evq_timer(%d-%d) RXQ(%d-%d) TXQ(%d-%d)", + res->evq_int_min, res->evq_int_max, res->evq_timer_min, + res->evq_timer_max, res->rxq_min, res->rxq_max, res->txq_min, + res->txq_max); +} + +#if defined(EFX_NOT_UPSTREAM) +/* We have a module parameter that can tell us to only load the char driver + * for 1 NIC (if there are multiple NICs in the system), and if so which one. + * This tells us the PCI bus and slot of the NIC to load for, or -1 to just + * load on all NICs (the default). + * Value is a hex number in the format + * bbbbss + * where: + * bbbb - PCI bus number + * ss - PCI slot number + */ +unsigned int only_NIC = -1; + +/** @ingroup module_params */ +module_param(only_NIC, uint, 0444); +MODULE_PARM_DESC(only_NIC, + "Initialise sfc_resource driver for one NIC only, " + "with specified PCI bus and slot"); +#endif + +static int +efrm_dl_probe(struct efx_dl_device *efrm_dev, + const struct net_device *net_dev, + const struct efx_dl_device_info *dev_info, + const char *silicon_rev) +{ + struct vi_resource_dimensions res_dim; + struct efx_dl_falcon_resources *res; + struct linux_efhw_nic *lnic; + struct pci_dev *dev; + struct efhw_nic *nic; + unsigned probe_flags = 0; + int rc; + + efrm_dev->priv = NULL; + + efx_dl_for_each_device_info_matching(dev_info, EFX_DL_FALCON_RESOURCES, + struct efx_dl_falcon_resources, + hdr, res) { + /* break out, leaving res pointing at the falcon resources */ + break; + } + + if (res == NULL) { + EFRM_ERR("%s: Unable to find falcon driverlink resources", + __FUNCTION__); + return -EINVAL; + } + + if (res->flags & EFX_DL_FALCON_USE_MSI) + probe_flags |= NIC_FLAG_TRY_MSI; + + dev = efrm_dev->pci_dev; + if (res->flags & EFX_DL_FALCON_DUAL_FUNC) { + unsigned vendor = dev->vendor; + EFRM_ASSERT(dev->bus != NULL); + dev = NULL; + +#if defined(EFX_NOT_UPSTREAM) + if (only_NIC != -1 && + (efrm_dev->pci_dev->bus->number != + ((only_NIC >> 8) & 0xFFFF) + || PCI_SLOT(efrm_dev->pci_dev->devfn) != + (only_NIC & 0xFF))) { + EFRM_NOTICE("Hiding char device %x:%x", + efrm_dev->pci_dev->bus->number, + PCI_SLOT(efrm_dev->pci_dev->devfn)); + return -ENODEV; + } +#endif + + while ((dev = pci_get_device(vendor, FALCON_S_DEVID, dev)) + != NULL) { + EFRM_ASSERT(dev->bus != NULL); + /* With PCIe (since it's point to point) + * the slot ID is usually 0 and + * the bus ID changes NIC to NIC, so we really + * need to check both. */ + if (PCI_SLOT(dev->devfn) == + PCI_SLOT(efrm_dev->pci_dev->devfn) + && dev->bus->number == + efrm_dev->pci_dev->bus->number) + break; + } + if (dev == NULL) { + EFRM_ERR("%s: Unable to find falcon secondary " + "PCI device.", __FUNCTION__); + return -ENODEV; + } + pci_dev_put(dev); + } + + init_vi_resource_dimensions(&res_dim, res); + + rc = efrm_nic_add(dev, probe_flags, net_dev->dev_addr, &lnic, + res->biu_lock, + res->buffer_table_min, res->buffer_table_max, + &res_dim); + if (rc != 0) + return rc; + + nic = &lnic->nic; + nic->mtu = net_dev->mtu + ETH_HLEN; + nic->net_driver_dev = efrm_dev; + nic->ifindex = net_dev->ifindex; +#ifdef HAS_NET_NAMESPACE + nic->nd_net = net_dev->nd_net; +#endif + efrm_dev->priv = nic; + + /* Register a callback so we're told when MTU changes. + * We dynamically allocate efx_dl_callbacks, because + * the callbacks that we want depends on the NIC type. + */ + lnic->dl_callbacks = + kmalloc(sizeof(struct efx_dl_callbacks), GFP_KERNEL); + if (!lnic->dl_callbacks) { + EFRM_ERR("Out of memory (%s)", __FUNCTION__); + efrm_nic_del(lnic); + return -ENOMEM; + } + memset(lnic->dl_callbacks, 0, sizeof(*lnic->dl_callbacks)); + lnic->dl_callbacks->mtu_changed = efrm_dl_mtu_changed; + + if ((res->flags & EFX_DL_FALCON_DUAL_FUNC) == 0) { + /* Net driver receives all management events. + * Register a callback to receive the ones + * we're interested in. */ + lnic->dl_callbacks->event = efrm_dl_event_falcon; + } + + rc = efx_dl_register_callbacks(efrm_dev, lnic->dl_callbacks); + if (rc < 0) { + EFRM_ERR("%s: efx_dl_register_callbacks failed (%d)", + __FUNCTION__, rc); + kfree(lnic->dl_callbacks); + efrm_nic_del(lnic); + return rc; + } + + return 0; +} + +/* When we unregister ourselves on module removal, this function will be + * called for all the devices we claimed */ +static void efrm_dl_remove(struct efx_dl_device *efrm_dev) +{ + struct efhw_nic *nic = efrm_dev->priv; + struct linux_efhw_nic *lnic = linux_efhw_nic(nic); + EFRM_TRACE("%s called", __FUNCTION__); + if (lnic->dl_callbacks) { + efx_dl_unregister_callbacks(efrm_dev, lnic->dl_callbacks); + kfree(lnic->dl_callbacks); + } + if (efrm_dev->priv) + efrm_nic_del(lnic); + EFRM_TRACE("%s OK", __FUNCTION__); +} + +static void efrm_dl_reset_suspend(struct efx_dl_device *efrm_dev) +{ + EFRM_NOTICE("%s:", __FUNCTION__); +} + +static void efrm_dl_reset_resume(struct efx_dl_device *efrm_dev, int ok) +{ + EFRM_NOTICE("%s: ok=%d", __FUNCTION__, ok); +} + +int efrm_driverlink_register(void) +{ + EFRM_TRACE("%s:", __FUNCTION__); + return efx_dl_register_driver(&efrm_dl_driver); +} + +void efrm_driverlink_unregister(void) +{ + EFRM_TRACE("%s:", __FUNCTION__); + efx_dl_unregister_driver(&efrm_dl_driver); +} + +static void efrm_dl_mtu_changed(struct efx_dl_device *efx_dev, int mtu) +{ + struct efhw_nic *nic = efx_dev->priv; + + ASSERT_RTNL(); /* Since we're looking at efx_dl_device::port_net_dev */ + + EFRM_TRACE("%s: old=%d new=%d", __FUNCTION__, nic->mtu, mtu + ETH_HLEN); + /* If this happened we must have agreed to it above */ + nic->mtu = mtu + ETH_HLEN; +} + +static void efrm_dl_event_falcon(struct efx_dl_device *efx_dev, void *p_event) +{ + struct efhw_nic *nic = efx_dev->priv; + struct linux_efhw_nic *lnic = linux_efhw_nic(nic); + efhw_event_t *ev = p_event; + + switch (FALCON_EVENT_CODE(ev)) { + case FALCON_EVENT_CODE_CHAR: + falcon_handle_char_event(nic, lnic->ev_handlers, ev); + break; + default: + EFRM_WARN("%s: unknown event type=%x", __FUNCTION__, + (unsigned)FALCON_EVENT_CODE(ev)); + break; + } +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/efx_vi_shm.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/efx_vi_shm.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,701 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides implementation of EFX VI API, used from Xen + * acceleration driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include "linux_resource_internal.h" +#include +#include +#include +#include +#include +#include +#include "kernel_compat.h" + +#if EFX_VI_STATIC_FILTERS +struct filter_list_t { + struct filter_list_t *next; + struct filter_resource *fres; +}; +#endif + +struct efx_vi_state { + struct vi_resource *vi_res; + + int nic_index; + + void (*callback_fn)(void *arg, int is_timeout); + void *callback_arg; + + struct completion flush_completion; + +#if EFX_VI_STATIC_FILTERS + struct filter_list_t fres[EFX_VI_STATIC_FILTERS]; + struct filter_list_t *free_fres; + struct filter_list_t *used_fres; +#endif +}; + +static void efx_vi_flush_complete(void *state_void) +{ + struct efx_vi_state *state = (struct efx_vi_state *)state_void; + + complete(&state->flush_completion); +} + +static inline int alloc_ep(struct efx_vi_state *state) +{ + int rc; + + rc = efrm_vi_resource_alloc(NULL, EFHW_VI_JUMBO_EN, + efx_vi_eventq_size, + FALCON_DMA_Q_DEFAULT_TX_SIZE, + FALCON_DMA_Q_DEFAULT_RX_SIZE, + 0, 0, &state->vi_res, NULL, NULL, NULL, + NULL); + if (rc < 0) { + EFRM_ERR("%s: ERROR efrm_vi_resource_alloc error %d", + __FUNCTION__, rc); + return rc; + } + + efrm_vi_register_flush_callback(state->vi_res, &efx_vi_flush_complete, + (void *)state); + + return 0; +} + +static int free_ep(struct efx_vi_state *efx_state) +{ + efrm_vi_resource_release(efx_state->vi_res); + + return 0; +} + +#if EFX_VI_STATIC_FILTERS +static int efx_vi_alloc_static_filters(struct efx_vi_state *efx_state) +{ + int i; + int rc; + + efx_state->free_fres = efx_state->used_fres = NULL; + + for (i = 0; i < EFX_VI_STATIC_FILTERS; i++) { + rc = efrm_filter_resource_alloc(efx_state->vi_res, + &efx_state->fres[i].fres); + if (rc < 0) { + EFRM_ERR("%s: efrm_filter_resource_alloc failed: %d", + __FUNCTION__, rc); + while (i > 0) { + i--; + efrm_filter_resource_release(efx_state-> + fres[i].fres); + } + efx_state->free_fres = NULL; + return rc; + } + efx_state->fres[i].next = efx_state->free_fres; + efx_state->free_fres = &efx_state->fres[i]; + } + + return 0; +} +#endif + +int efx_vi_alloc(struct efx_vi_state **vih_out, int nic_index) +{ + struct efx_vi_state *efx_state; + int rc; + + BUG_ON(nic_index < 0 || nic_index >= EFHW_MAX_NR_DEVS); + + efx_state = kmalloc(sizeof(struct efx_vi_state), GFP_KERNEL); + + if (!efx_state) { + EFRM_ERR("%s: failed to allocate memory for efx_vi_state", + __FUNCTION__); + rc = -ENOMEM; + goto fail; + } + + efx_state->nic_index = nic_index; + init_completion(&efx_state->flush_completion); + + /* basically allocate_pt_endpoint() */ + rc = alloc_ep(efx_state); + if (rc) { + EFRM_ERR("%s: alloc_ep failed: %d", __FUNCTION__, rc); + goto fail_no_pt; + } +#if EFX_VI_STATIC_FILTERS + /* Statically allocate a set of filter resources - removes the + restriction on not being able to use efx_vi_filter() from + in_atomic() */ + rc = efx_vi_alloc_static_filters(efx_state); + if (rc) + goto fail_no_filters; +#endif + + *vih_out = efx_state; + + return 0; +#if EFX_VI_STATIC_FILTERS +fail_no_filters: + free_ep(efx_state); +#endif +fail_no_pt: + kfree(efx_state); +fail: + return rc; +} +EXPORT_SYMBOL(efx_vi_alloc); + +void efx_vi_free(struct efx_vi_state *vih) +{ + struct efx_vi_state *efx_state = vih; + + /* TODO flush dma channels, init dma queues?. See ef_free_vnic() */ +#if EFX_VI_STATIC_FILTERS + int i; + + for (i = 0; i < EFX_VI_STATIC_FILTERS; i++) + efrm_filter_resource_release(efx_state->fres[i].fres); +#endif + + if (efx_state->vi_res) + free_ep(efx_state); + + kfree(efx_state); +} +EXPORT_SYMBOL(efx_vi_free); + +void efx_vi_reset(struct efx_vi_state *vih) +{ + struct efx_vi_state *efx_state = vih; + + efrm_pt_flush(efx_state->vi_res); + + while (wait_for_completion_timeout(&efx_state->flush_completion, HZ) + == 0) + efrm_vi_resource_flush_retry(efx_state->vi_res); + + /* Bosch the eventq */ + efrm_eventq_reset(efx_state->vi_res, 0); + return; +} +EXPORT_SYMBOL(efx_vi_reset); + +static void +efx_vi_eventq_callback(void *context, int is_timeout, struct efhw_nic *nic) +{ + struct efx_vi_state *efx_state = (struct efx_vi_state *)context; + + EFRM_ASSERT(efx_state->callback_fn); + + return efx_state->callback_fn(efx_state->callback_arg, is_timeout); +} + +int +efx_vi_eventq_register_callback(struct efx_vi_state *vih, + void (*callback)(void *context, int is_timeout), + void *context) +{ + struct efx_vi_state *efx_state = vih; + + efx_state->callback_fn = callback; + efx_state->callback_arg = context; + + /* Register the eventq timeout event callback */ + efrm_eventq_register_callback(efx_state->vi_res, + efx_vi_eventq_callback, efx_state); + + return 0; +} +EXPORT_SYMBOL(efx_vi_eventq_register_callback); + +int efx_vi_eventq_kill_callback(struct efx_vi_state *vih) +{ + struct efx_vi_state *efx_state = vih; + + if (efx_state->vi_res->evq_callback_fn) + efrm_eventq_kill_callback(efx_state->vi_res); + + efx_state->callback_fn = NULL; + efx_state->callback_arg = NULL; + + return 0; +} +EXPORT_SYMBOL(efx_vi_eventq_kill_callback); + +struct efx_vi_dma_map_state { + struct efhw_buffer_table_allocation bt_handle; + int n_pages; + dma_addr_t *dma_addrs; +}; + +int +efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages, + int n_pages, struct efx_vi_dma_map_state **dmh_out) +{ + struct efx_vi_state *efx_state = vih; + int order = fls(n_pages - 1), rc, i, evq_id; + dma_addr_t dma_addr; + struct efx_vi_dma_map_state *dm_state; + + if (n_pages != (1 << order)) { + EFRM_WARN("%s: Can only allocate buffers in power of 2 " + "sizes (not %d)", __FUNCTION__, n_pages); + return -EINVAL; + } + + dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL); + if (!dm_state) + return -ENOMEM; + + dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages, + GFP_KERNEL); + if (!dm_state->dma_addrs) { + kfree(dm_state); + return -ENOMEM; + } + + rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle); + if (rc < 0) { + kfree(dm_state->dma_addrs); + kfree(dm_state); + return rc; + } + + evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle); + for (i = 0; i < n_pages; i++) { + /* TODO do we need to get_page() here ? */ + + dma_addr = pci_map_page + (linux_efhw_nic(efrm_nic_table.nic[efx_state->nic_index])-> + pci_dev, pages[i], 0, PAGE_SIZE, PCI_DMA_TODEVICE); + + efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr, + evq_id); + + dm_state->dma_addrs[i] = dma_addr; + + /* Would be nice to not have to call commit each time, but + * comment says there are hardware restrictions on how often + * you can go without it, so do this to be safe */ + efrm_buffer_table_commit(); + } + + dm_state->n_pages = n_pages; + + *dmh_out = dm_state; + + return 0; +} +EXPORT_SYMBOL(efx_vi_dma_map_pages); + +/* Function needed as Xen can't get pages for grants in dom0, but can + get dma address */ +int +efx_vi_dma_map_addrs(struct efx_vi_state *vih, + unsigned long long *bus_dev_addrs, + int n_pages, struct efx_vi_dma_map_state **dmh_out) +{ + struct efx_vi_state *efx_state = vih; + int order = fls(n_pages - 1), rc, i, evq_id; + dma_addr_t dma_addr; + struct efx_vi_dma_map_state *dm_state; + + if (n_pages != (1 << order)) { + EFRM_WARN("%s: Can only allocate buffers in power of 2 " + "sizes (not %d)", __FUNCTION__, n_pages); + return -EINVAL; + } + + dm_state = kmalloc(sizeof(struct efx_vi_dma_map_state), GFP_KERNEL); + if (!dm_state) + return -ENOMEM; + + dm_state->dma_addrs = kmalloc(sizeof(dma_addr_t) * n_pages, + GFP_KERNEL); + if (!dm_state->dma_addrs) { + kfree(dm_state); + return -ENOMEM; + } + + rc = efrm_buffer_table_alloc(order, &dm_state->bt_handle); + if (rc < 0) { + kfree(dm_state->dma_addrs); + kfree(dm_state); + return rc; + } + + evq_id = EFRM_RESOURCE_INSTANCE(efx_state->vi_res->rs.rs_handle); +#if 0 + EFRM_WARN("%s: mapping %d pages to evq %d, bt_ids %d-%d\n", + __FUNCTION__, n_pages, evq_id, + dm_state->bt_handle.base, + dm_state->bt_handle.base + n_pages); +#endif + for (i = 0; i < n_pages; i++) { + + dma_addr = (dma_addr_t)bus_dev_addrs[i]; + + efrm_buffer_table_set(&dm_state->bt_handle, i, dma_addr, + evq_id); + + dm_state->dma_addrs[i] = dma_addr; + + /* Would be nice to not have to call commit each time, but + * comment says there are hardware restrictions on how often + * you can go without it, so do this to be safe */ + efrm_buffer_table_commit(); + } + + dm_state->n_pages = n_pages; + + *dmh_out = dm_state; + + return 0; +} +EXPORT_SYMBOL(efx_vi_dma_map_addrs); + +void +efx_vi_dma_unmap_pages(struct efx_vi_state *vih, + struct efx_vi_dma_map_state *dmh) +{ + struct efx_vi_state *efx_state = vih; + struct efx_vi_dma_map_state *dm_state = + (struct efx_vi_dma_map_state *)dmh; + int i; + + efrm_buffer_table_free(&dm_state->bt_handle); + + for (i = 0; i < dm_state->n_pages; ++i) + pci_unmap_page(linux_efhw_nic + (efrm_nic_table.nic[efx_state->nic_index])->pci_dev, + dm_state->dma_addrs[i], PAGE_SIZE, PCI_DMA_TODEVICE); + + kfree(dm_state->dma_addrs); + kfree(dm_state); + + return; +} +EXPORT_SYMBOL(efx_vi_dma_unmap_pages); + +void +efx_vi_dma_unmap_addrs(struct efx_vi_state *vih, + struct efx_vi_dma_map_state *dmh) +{ + struct efx_vi_dma_map_state *dm_state = + (struct efx_vi_dma_map_state *)dmh; + + efrm_buffer_table_free(&dm_state->bt_handle); + + kfree(dm_state->dma_addrs); + kfree(dm_state); + + return; +} +EXPORT_SYMBOL(efx_vi_dma_unmap_addrs); + +unsigned +efx_vi_dma_get_map_addr(struct efx_vi_state *vih, + struct efx_vi_dma_map_state *dmh) +{ + struct efx_vi_dma_map_state *dm_state = + (struct efx_vi_dma_map_state *)dmh; + + return EFHW_BUFFER_ADDR(dm_state->bt_handle.base, 0); +} +EXPORT_SYMBOL(efx_vi_dma_get_map_addr); + +#if EFX_VI_STATIC_FILTERS +static int +get_filter(struct efx_vi_state *efx_state, + efrm_resource_handle_t pthandle, struct filter_resource **fres_out) +{ + struct filter_list_t *flist; + if (efx_state->free_fres == NULL) + return -ENOMEM; + else { + flist = efx_state->free_fres; + efx_state->free_fres = flist->next; + flist->next = efx_state->used_fres; + efx_state->used_fres = flist; + *fres_out = flist->fres; + return 0; + } +} +#endif + +static void +release_filter(struct efx_vi_state *efx_state, struct filter_resource *fres) +{ +#if EFX_VI_STATIC_FILTERS + struct filter_list_t *flist = efx_state->used_fres, *prev = NULL; + while (flist) { + if (flist->fres == fres) { + if (prev) + prev->next = flist->next; + else + efx_state->used_fres = flist->next; + flist->next = efx_state->free_fres; + efx_state->free_fres = flist; + return; + } + prev = flist; + flist = flist->next; + } + EFRM_ERR("%s: couldn't find filter", __FUNCTION__); +#else + return efrm_filter_resource_release(fres); +#endif +} + +int +efx_vi_filter(struct efx_vi_state *vih, int protocol, + unsigned ip_addr_be32, int port_le16, + struct filter_resource_t **fh_out) +{ + struct efx_vi_state *efx_state = vih; + struct filter_resource *frs; + int rc; + +#if EFX_VI_STATIC_FILTERS + rc = get_filter(efx_state, efx_state->vi_res->rs.rs_handle, &frs); +#else + rc = efrm_filter_resource_alloc(efx_state->vi_res, &frs); +#endif + if (rc < 0) + return rc; + + /* Add the hardware filter. We pass in the source port and address + * as 0 (wildcard) to minimise the number of filters needed. */ + if (protocol == IPPROTO_TCP) { + rc = efrm_filter_resource_tcp_set(frs, 0, 0, ip_addr_be32, + port_le16); + } else { + rc = efrm_filter_resource_udp_set(frs, 0, 0, ip_addr_be32, + port_le16); + } + + *fh_out = (struct filter_resource_t *)frs; + + return rc; +} +EXPORT_SYMBOL(efx_vi_filter); + +int +efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh) +{ + struct efx_vi_state *efx_state = vih; + struct filter_resource *frs = (struct filter_resource *)fh; + int rc; + + rc = efrm_filter_resource_clear(frs); + release_filter(efx_state, frs); + + return rc; +} +EXPORT_SYMBOL(efx_vi_filter_stop); + +int +efx_vi_hw_resource_get_virt(struct efx_vi_state *vih, + struct efx_vi_hw_resource_metadata *mdata, + struct efx_vi_hw_resource *hw_res_array, + int *length) +{ + EFRM_NOTICE("%s: TODO!", __FUNCTION__); + + return 0; +} +EXPORT_SYMBOL(efx_vi_hw_resource_get_virt); + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) +int +efx_vi_hw_resource_get_phys(struct efx_vi_state *vih, + struct efx_vi_hw_resource_metadata *mdata, + struct efx_vi_hw_resource *hw_res_array, + int *length) +{ + struct efx_vi_state *efx_state = vih; + int i, ni = efx_state->nic_index; + struct linux_efhw_nic *lnic = linux_efhw_nic(efrm_nic_table.nic[ni]); + unsigned long phys = lnic->ctr_ap_pci_addr; + struct efrm_resource *ep_res = &efx_state->vi_res->rs; + unsigned ep_mmap_bytes; + + if (*length < EFX_VI_HW_RESOURCE_MAXSIZE) + return -EINVAL; + + mdata->version = 0; + + mdata->nic_arch = efrm_nic_table.nic[ni]->devtype.arch; + mdata->nic_variant = efrm_nic_table.nic[ni]->devtype.variant; + mdata->nic_revision = efrm_nic_table.nic[ni]->devtype.revision; + + mdata->evq_order = + efx_state->vi_res->nic_info[ni].evq_pages.iobuff.order; + mdata->evq_offs = efx_state->vi_res->nic_info[ni].evq_pages.iobuff_off; + mdata->evq_capacity = efx_vi_eventq_size; + mdata->instance = EFRM_RESOURCE_INSTANCE(ep_res->rs_handle); + mdata->rx_capacity = FALCON_DMA_Q_DEFAULT_RX_SIZE; + mdata->tx_capacity = FALCON_DMA_Q_DEFAULT_TX_SIZE; + + ep_mmap_bytes = FALCON_DMA_Q_DEFAULT_MMAP; + EFRM_ASSERT(ep_mmap_bytes == PAGE_SIZE * 2); + +#ifndef NDEBUG + { + /* Sanity about doorbells */ + unsigned long tx_dma_page_addr, rx_dma_page_addr; + + /* get rx doorbell address */ + rx_dma_page_addr = + phys + falcon_rx_dma_page_addr(mdata->instance); + /* get tx doorbell address */ + tx_dma_page_addr = + phys + falcon_tx_dma_page_addr(mdata->instance); + + /* Check the lower bits of the TX doorbell will be + * consistent. */ + EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & + FALCON_DMA_PAGE_MASK) == + (TX_DESC_UPD_REG_PAGE123K_OFST & + FALCON_DMA_PAGE_MASK)); + + /* Check the lower bits of the RX doorbell will be + * consistent. */ + EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & + FALCON_DMA_PAGE_MASK) == + (RX_DESC_UPD_REG_PAGE123K_OFST & + FALCON_DMA_PAGE_MASK)); + + /* Check that the doorbells will be in the same page. */ + EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK) == + (RX_DESC_UPD_REG_PAGE4_OFST & PAGE_MASK)); + + /* Check that the doorbells are in the same page. */ + EFRM_ASSERT((tx_dma_page_addr & PAGE_MASK) == + (rx_dma_page_addr & PAGE_MASK)); + + /* Check that the TX doorbell offset is correct. */ + EFRM_ASSERT((TX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) == + (tx_dma_page_addr & ~PAGE_MASK)); + + /* Check that the RX doorbell offset is correct. */ + EFRM_ASSERT((RX_DESC_UPD_REG_PAGE4_OFST & ~PAGE_MASK) == + (rx_dma_page_addr & ~PAGE_MASK)); + } +#endif + + i = 0; + hw_res_array[i].type = EFX_VI_HW_RESOURCE_TXDMAQ; + hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; + hw_res_array[i].more_to_follow = 0; + hw_res_array[i].length = PAGE_SIZE; + hw_res_array[i].address = + (unsigned long)efx_state->vi_res->nic_info[ni]. + dmaq_pages[EFRM_VI_RM_DMA_QUEUE_TX].kva; + + i++; + hw_res_array[i].type = EFX_VI_HW_RESOURCE_RXDMAQ; + hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; + hw_res_array[i].more_to_follow = 0; + hw_res_array[i].length = PAGE_SIZE; + hw_res_array[i].address = + (unsigned long)efx_state->vi_res->nic_info[ni]. + dmaq_pages[EFRM_VI_RM_DMA_QUEUE_RX].kva; + + /* NB EFX_VI_HW_RESOURCE_TXBELL not used on Falcon */ + /* NB EFX_VI_HW_RESOURCE_RXBELL not used on Falcon */ + + i++; + hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQTIMER; + hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; + hw_res_array[i].more_to_follow = 0; + hw_res_array[i].length = PAGE_SIZE; + hw_res_array[i].address = + (unsigned long)phys + falcon_timer_page_addr(mdata->instance); + + /* NB EFX_VI_HW_RESOURCE_EVQPTR not used on Falcon */ + + i++; + switch (efrm_nic_table.nic[ni]->devtype.variant) { + case 'A': + hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR; + hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; + hw_res_array[i].more_to_follow = 0; + hw_res_array[i].length = PAGE_SIZE; + hw_res_array[i].address = (unsigned long)phys + + EVQ_RPTR_REG_OFST + + (FALCON_REGISTER128 * mdata->instance); + break; + case 'B': + hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET; + hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; + hw_res_array[i].more_to_follow = 0; + hw_res_array[i].length = PAGE_SIZE; + hw_res_array[i].address = + (unsigned long)FALCON_EVQ_RPTR_REG_P0; + break; + default: + EFRM_ASSERT(0); + break; + } + + i++; + hw_res_array[i].type = EFX_VI_HW_RESOURCE_EVQMEMKVA; + hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_IOBUFFER; + hw_res_array[i].more_to_follow = 0; + hw_res_array[i].length = PAGE_SIZE; + hw_res_array[i].address = (unsigned long)efx_state->vi_res-> + nic_info[ni].evq_pages.iobuff.kva; + + i++; + hw_res_array[i].type = EFX_VI_HW_RESOURCE_BELLPAGE; + hw_res_array[i].mem_type = EFX_VI_HW_RESOURCE_PERIPHERAL; + hw_res_array[i].more_to_follow = 0; + hw_res_array[i].length = PAGE_SIZE; + hw_res_array[i].address = + (unsigned long)(phys + + falcon_tx_dma_page_addr(mdata->instance)) + >> PAGE_SHIFT; + + i++; + + EFRM_ASSERT(i <= *length); + + *length = i; + + return 0; +} +EXPORT_SYMBOL(efx_vi_hw_resource_get_phys); +#endif Index: head-2008-03-17/drivers/net/sfc/sfc_resource/eventq.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/eventq.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,320 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains event queue support. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include +#include + +#define KEVENTQ_MAGIC 0x07111974 + +/*! Helper function to allocate the iobuffer needed by an eventq + * - it ensures the eventq has the correct alignment for the NIC + * + * \param rm Event-queue resource manager + * \param instance Event-queue instance (index) + * \param buf_bytes Requested size of eventq + * \return < 0 if iobuffer allocation fails + */ +int +efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic, + struct eventq_resource_hardware *h, + int evq_instance, unsigned buf_bytes) +{ + unsigned int page_order; + int rc; + + /* Allocate an iobuffer. */ + page_order = get_order(buf_bytes); + + h->iobuff_off = 0; + + EFHW_TRACE("allocating eventq size %x", + 1u << (page_order + PAGE_SHIFT)); + rc = efhw_iopages_alloc(nic, &h->iobuff, page_order); + if (rc < 0) { + EFHW_WARN("%s: failed to allocate %u pages", + __FUNCTION__, 1u << page_order); + return rc; + } + + /* Set the eventq pages to match EFHW_CLEAR_EVENT() */ + if (EFHW_CLEAR_EVENT_VALUE) + memset(efhw_iopages_ptr(&h->iobuff) + h->iobuff_off, + EFHW_CLEAR_EVENT_VALUE, (1u << page_order) * PAGE_SIZE); + + EFHW_TRACE("%s: allocated %u pages", __FUNCTION__, 1u << (page_order)); + + /* For Falcon the NIC is programmed with the base buffer address of a + * contiguous region of buffer space. This means that larger than a + * PAGE event queues can be expected to allocate even when the host's + * physical memory is fragmented */ + EFHW_ASSERT(efhw_nic_have_hw(nic)); + EFHW_ASSERT(page_order <= h->buf_tbl_alloc.order); + + /* Initialise the buffer table entries. */ + falcon_nic_buffer_table_set_n(nic, h->buf_tbl_alloc.base, + efhw_iopages_dma_addr(&h->iobuff) + + h->iobuff_off, EFHW_NIC_PAGE_SIZE, 0, + 1 << page_order, 0); + + if (evq_instance >= FALCON_EVQ_TBL_RESERVED) + falcon_nic_buffer_table_confirm(nic); + return 0; +} + +/********************************************************************** + * Kernel event queue management. + */ + +/* Values for [struct efhw_keventq::lock] field. */ +#define KEVQ_UNLOCKED 0 +#define KEVQ_LOCKED 1 +#define KEVQ_RECHECK 2 + +int +efhw_keventq_ctor(struct efhw_nic *nic, int instance, + struct efhw_keventq *evq, + struct efhw_ev_handler *ev_handlers) +{ + int rc; + unsigned buf_bytes = evq->hw.capacity * sizeof(efhw_event_t); + + evq->instance = instance; + evq->ev_handlers = ev_handlers; + + /* allocate an IObuffer for the eventq */ + rc = efhw_nic_event_queue_alloc_iobuffer(nic, &evq->hw, evq->instance, + buf_bytes); + if (rc < 0) + return rc; + + /* Zero the timer-value for this queue. + AND Tell the nic about the event queue. */ + efhw_nic_event_queue_enable(nic, evq->instance, evq->hw.capacity, + efhw_iopages_dma_addr(&evq->hw.iobuff) + + evq->hw.iobuff_off, + evq->hw.buf_tbl_alloc.base); + + evq->lock = KEVQ_UNLOCKED; + evq->evq_base = efhw_iopages_ptr(&evq->hw.iobuff) + evq->hw.iobuff_off; + evq->evq_ptr = 0; + evq->evq_mask = (evq->hw.capacity * sizeof(efhw_event_t)) - 1u; + + EFHW_TRACE("%s: [%d] base=%p end=%p", __FUNCTION__, evq->instance, + evq->evq_base, evq->evq_base + buf_bytes); + + return 0; +} + +void efhw_keventq_dtor(struct efhw_nic *nic, struct efhw_keventq *evq) +{ + EFHW_ASSERT(evq); + + EFHW_TRACE("%s: [%d]", __FUNCTION__, evq->instance); + + /* Zero the timer-value for this queue. + And Tell NIC to stop using this event queue. */ + efhw_nic_event_queue_disable(nic, evq->instance, 0); + + /* free the pages used by the eventq itself */ + efhw_iopages_free(nic, &evq->hw.iobuff); +} + +void +efhw_handle_txdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h, + efhw_event_t *evp) +{ + int instance = (int)FALCON_EVENT_TX_FLUSH_Q_ID(evp); + EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance); + + if (!h->dmaq_flushed_fn) { + EFHW_WARN("%s: no handler registered", __FUNCTION__); + return; + } + + h->dmaq_flushed_fn(nic, instance, false); +} + +void +efhw_handle_rxdmaq_flushed(struct efhw_nic *nic, struct efhw_ev_handler *h, + efhw_event_t *evp) +{ + int instance = (int)FALCON_EVENT_RX_FLUSH_Q_ID(evp); + EFHW_TRACE("%s: instance=%d", __FUNCTION__, instance); + + if (!h->dmaq_flushed_fn) { + EFHW_WARN("%s: no handler registered", __FUNCTION__); + return; + } + + h->dmaq_flushed_fn(nic, instance, true); +} + +void +efhw_handle_wakeup_event(struct efhw_nic *nic, struct efhw_ev_handler *h, + efhw_event_t *evp) +{ + if (!h->wakeup_fn) { + EFHW_WARN("%s: no handler registered", __FUNCTION__); + return; + } + + h->wakeup_fn(nic, evp); +} + +void +efhw_handle_timeout_event(struct efhw_nic *nic, struct efhw_ev_handler *h, + efhw_event_t *evp) +{ + if (!h->timeout_fn) { + EFHW_WARN("%s: no handler registered", __FUNCTION__); + return; + } + + h->timeout_fn(nic, evp); +} + +/********************************************************************** + * Kernel event queue event handling. + */ + +int efhw_keventq_poll(struct efhw_nic *nic, struct efhw_keventq *q) +{ + efhw_event_t *ev; + int l, count = 0; + + EFHW_ASSERT(nic); + EFHW_ASSERT(q); + EFHW_ASSERT(q->ev_handlers); + + /* Acquire the lock, or mark the queue as needing re-checking. */ + for (;;) { + l = q->lock; + if (l == KEVQ_UNLOCKED) { + if ((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l) + break; + } else if (l == KEVQ_LOCKED) { + if ((int)cmpxchg(&q->lock, l, KEVQ_RECHECK) == l) + return 0; + } else { /* already marked for re-checking */ + EFHW_ASSERT(l == KEVQ_RECHECK); + return 0; + } + } + + if (unlikely(EFHW_EVENT_OVERFLOW(q, q))) + goto overflow; + + ev = EFHW_EVENT_PTR(q, q, 0); + +#ifndef NDEBUG + if (!EFHW_IS_EVENT(ev)) + EFHW_TRACE("%s: %d NO EVENTS!", __FUNCTION__, q->instance); +#endif + + for (;;) { + /* Convention for return codes for handlers is: + ** 0 - no error, event consumed + ** 1 - no error, event not consumed + ** -ve - error, event not consumed + */ + if (likely(EFHW_IS_EVENT(ev))) { + count++; + + switch (FALCON_EVENT_CODE(ev)) { + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + case FALCON_EVENT_CODE_CHAR: + falcon_handle_char_event(nic, q->ev_handlers, + ev); + break; +#endif + + default: + EFHW_ERR("efhw_keventq_poll: [%d] UNEXPECTED " + "EVENT:"FALCON_EVENT_FMT, + q->instance, + FALCON_EVENT_PRI_ARG(*ev)); + } + + EFHW_CLEAR_EVENT(ev); + EFHW_EVENTQ_NEXT(q); + + ev = EFHW_EVENT_PTR(q, q, 0); + } else { + /* No events left. Release the lock (checking if we + * need to re-poll to avoid race). */ + l = q->lock; + if (l == KEVQ_LOCKED) { + if ((int)cmpxchg(&q->lock, l, KEVQ_UNLOCKED) + == l) { + EFHW_TRACE + ("efhw_keventq_poll: %d clean exit", + q->instance); + goto clean_exit; + } + } + + /* Potentially more work to do. */ + l = q->lock; + EFHW_ASSERT(l == KEVQ_RECHECK); + EFHW_TEST((int)cmpxchg(&q->lock, l, KEVQ_LOCKED) == l); + EFHW_TRACE("efhw_keventq_poll: %d re-poll required", + q->instance); + } + } + + /* shouldn't get here */ + EFHW_ASSERT(0); + +overflow: + /* ?? Oh dear. Should we poll everything that could have possibly + ** happened? Or merely cry out in anguish... + */ + EFHW_WARN("efhw_keventq_poll: %d ***** OVERFLOW nic %d *****", + q->instance, nic->index); + + q->lock = KEVQ_UNLOCKED; + return count; + +clean_exit: +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + /* Ack the processed events so that this event queue can potentially + raise interrupts again */ + falcon_nic_evq_ack(nic, q->instance, + (EFHW_EVENT_OFFSET(q, q, 0) / sizeof(efhw_event_t)), + false); +#endif + return count; +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/falcon.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/falcon.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,2758 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains Falcon hardware support. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/*---------------------------------------------------------------------------- + * + * Workarounds and options + * + *---------------------------------------------------------------------------*/ + +/* on for debug builds */ +#ifndef NDEBUG +# define FALCON_FULL_FILTER_CACHE 1 /* complete SW shadow of filter tbl */ +# define FALCON_VERIFY_FILTERS 0 +#else /* Also adds duplicate filter check */ +# define FALCON_FULL_FILTER_CACHE 1 /* keep this on for some security */ +# define FALCON_VERIFY_FILTERS 0 +#endif + +/* options */ +#define RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL 8 /* default search limit */ +#define RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD 8 /* default search limit */ +#define RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL 8 /* default search limit */ +#define RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD 8 /* default search limit */ +#define RX_FILTER_CTL_SRCH_FUDGE_WILD 3 /* increase the search limit */ +#define RX_FILTER_CTL_SRCH_FUDGE_FULL 1 /* increase the search limit */ + +#define FALCON_MAC_SET_TYPE_BY_SPEED 1 + +/* FIXME: We should detect mode at runtime. */ +#define FALCON_BUFFER_TABLE_FULL_MODE 1 + +/*---------------------------------------------------------------------------- + * + * Debug Macros + * + *---------------------------------------------------------------------------*/ + +#ifndef __KERNEL__ +#define _DEBUG_SYM_ extern +#else +#define _DEBUG_SYM_ static inline +#endif + + /*---------------------------------------------------------------------------- + * + * Macros and forward declarations + * + *--------------------------------------------------------------------------*/ + +#define FALCON_REGION_NUM 4 /* number of supported memory regions */ + +#define FALCON_BUFFER_TBL_HALF_BYTES 4 +#define FALCON_BUFFER_TBL_FULL_BYTES 8 + +/* Shadow buffer table - hack for testing only */ +#if FALCON_BUFFER_TABLE_FULL_MODE == 0 +# define FALCON_USE_SHADOW_BUFFER_TABLE 1 +#else +# define FALCON_USE_SHADOW_BUFFER_TABLE 0 +#endif + +#if FALCON_USE_SHADOW_BUFFER_TABLE +static uint64_t _falcon_buffer_table[FALCON_BUFFER_TBL_NUM]; +#endif + +/*---------------------------------------------------------------------------- + * + * Header assertion checks + * + *---------------------------------------------------------------------------*/ + +#define FALCON_ASSERT_VALID() /* nothing yet */ + +/* Falcon has a 128bit register model but most registers have useful + defaults or only implement a small number of bits. Some registers + can be programmed 32bits UNLOCKED all others should be interlocked + against other threads within the same protection domain. + + Aim is for software to perform the minimum number of writes and + also to minimise the read-modify-write activity (which generally + indicates a lack of clarity in the use model). + + Registers which are programmed in this module are listed below + together with the method of access. Care must be taken to ensure + remain adequate if the register spec changes. + + All 128bits programmed + FALCON_BUFFER_TBL_HALF + RX_FILTER_TBL + TX_DESC_PTR_TBL + RX_DESC_PTR_TBL + DRV_EV_REG + + All 64bits programmed + FALCON_BUFFER_TBL_FULL + + 32 bits are programmed (UNLOCKED) + EVQ_RPTR_REG + + Low 64bits programmed remainder are written with a random number + RX_DC_CFG_REG + TX_DC_CFG_REG + SRM_RX_DC_CFG_REG + SRM_TX_DC_CFG_REG + BUF_TBL_CFG_REG + BUF_TBL_UPD_REG + SRM_UPD_EVQ_REG + EVQ_PTR_TBL + TIMER_CMD_REG + TX_PACE_TBL + FATAL_INTR_REG + INT_EN_REG (When enabling interrupts) + TX_FLUSH_DESCQ_REG + RX_FLUSH_DESCQ + + Read Modify Write on low 32bits remainder are written with a random number + INT_EN_REG (When sending a driver interrupt) + DRIVER_REGX + + Read Modify Write on low 64bits remainder are written with a random number + SRM_CFG_REG_OFST + RX_CFG_REG_OFST + RX_FILTER_CTL_REG + + Read Modify Write on full 128bits + TXDP_RESERVED_REG (aka TXDP_UNDOCUMENTED) + TX_CFG_REG + +*/ + +/*---------------------------------------------------------------------------- + * + * Filters static data + * + *---------------------------------------------------------------------------*/ + +/* Defaults are set here to support dma.c */ +static unsigned tcp_full_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL; +static unsigned tcp_wild_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD; +static unsigned udp_full_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL; +static unsigned udp_wild_srch_limit = RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD; + +#if FALCON_VERIFY_FILTERS +static void _falcon_nic_ipfilter_sanity(struct efhw_nic *nic); +#endif + +/*---------------------------------------------------------------------------- + * + * Filters low-level register interface + * + *---------------------------------------------------------------------------*/ + +/* Build the filter entry */ +static void +_falcon_nic_ipfilter_build(struct efhw_nic *nic, + int tcp, int full, int rss_b0, int scat_b0, + uint filter_i, uint dmaq_id, + unsigned saddr_le32, unsigned sport_le16, + unsigned daddr_le32, unsigned dport_le16, + uint64_t *q0, uint64_t *q1) +{ + uint64_t v1, v2, v3, v4; + int type = tcp << 4 | full; + + v4 = (((!tcp) << __DW4(TCP_UDP_1_LBN)) | + (dmaq_id << __DW4(RXQ_ID_1_LBN))); + + switch (nic->devtype.variant) { + case 'A': + EFHW_ASSERT(!rss_b0); + break; + case 'B': + v4 |= scat_b0 << __DW4(SCATTER_EN_1_B0_LBN); + v4 |= rss_b0 << __DW4(RSS_EN_1_B0_LBN); + break; + default: + EFHW_ASSERT(0); + break; + } + + v3 = daddr_le32; + + switch (type) { + + case 0x11: /* TCP_FULL */ + case 0x01: /* UDP_FULL */ + v2 = ((dport_le16 << __DW2(DEST_PORT_TCP_1_LBN)) | + (__HIGH(saddr_le32, SRC_IP_1_LBN, SRC_IP_1_WIDTH))); + v1 = ((__LOW(saddr_le32, SRC_IP_1_LBN, SRC_IP_1_WIDTH)) | + (sport_le16 << SRC_TCP_DEST_UDP_1_LBN)); + break; + + case 0x10: /* TCP_WILD */ + v2 = ((uint64_t) dport_le16 << __DW2(DEST_PORT_TCP_1_LBN)); + v1 = 0; + break; + + case 0x00: /* UDP_WILD */ + v2 = 0; + v1 = ((uint64_t) dport_le16 << SRC_TCP_DEST_UDP_0_LBN); + break; + + default: + EFHW_ASSERT(0); + v2 = 0; + v1 = 0; + } + + *q0 = (v2 << 32) | v1; + *q1 = (v4 << 32) | v3; +} + +static void +_falcon_nic_ipfilter_set(struct efhw_nic *nic, int tcp, + int full, int rss_b0, int scat_b0, + uint filter_i, uint dmaq_id, + unsigned saddr_le32, unsigned sport_le16, + unsigned daddr_le32, unsigned dport_le16) +{ + uint64_t q0, q1; + + /* wish you wouldn't do this */ + EFHW_BUILD_ASSERT(RX_FILTER_TBL1_OFST == + RX_FILTER_TBL0_OFST + FALCON_REGISTER128); + EFHW_BUILD_ASSERT(TCP_UDP_1_LBN == TCP_UDP_0_LBN); + EFHW_BUILD_ASSERT(RXQ_ID_1_LBN == RXQ_ID_0_LBN); + EFHW_BUILD_ASSERT(DEST_IP_1_LBN == DEST_IP_0_LBN); + EFHW_BUILD_ASSERT(DEST_PORT_TCP_1_LBN == DEST_PORT_TCP_0_LBN); + EFHW_BUILD_ASSERT(SRC_IP_1_LBN == SRC_IP_0_LBN); + EFHW_BUILD_ASSERT(SRC_TCP_DEST_UDP_1_LBN == SRC_TCP_DEST_UDP_0_LBN); + EFHW_BUILD_ASSERT(SCATTER_EN_1_B0_LBN == SCATTER_EN_0_B0_LBN); + EFHW_BUILD_ASSERT(RSS_EN_1_B0_LBN == RSS_EN_0_B0_LBN); + + EFHW_BUILD_ASSERT(TCP_UDP_1_WIDTH == TCP_UDP_0_WIDTH); + EFHW_BUILD_ASSERT(RXQ_ID_1_WIDTH == RXQ_ID_0_WIDTH); + EFHW_BUILD_ASSERT(DEST_IP_1_WIDTH == DEST_IP_0_WIDTH); + EFHW_BUILD_ASSERT(DEST_PORT_TCP_1_WIDTH == DEST_PORT_TCP_0_WIDTH); + EFHW_BUILD_ASSERT(SRC_IP_1_WIDTH == SRC_IP_0_WIDTH); + EFHW_BUILD_ASSERT(SRC_TCP_DEST_UDP_1_WIDTH == SRC_TCP_DEST_UDP_0_WIDTH); + EFHW_BUILD_ASSERT(SCATTER_EN_1_B0_WIDTH == SCATTER_EN_0_B0_WIDTH); + EFHW_BUILD_ASSERT(RSS_EN_1_B0_WIDTH == RSS_EN_0_B0_WIDTH); + + /* TODO: Use filter table 1 as well */ + ulong offset = RX_FILTER_TBL0_OFST + filter_i * 2 * FALCON_REGISTER128; + + EFHW_TRACE("%s[%x]: offset=%lx", __FUNCTION__, filter_i, offset); + + EFHW_TRACE("%s[%x]: filter %d tcp %d full %d src=%x:%x dest=%x:%x%s%s", + __FUNCTION__, filter_i, tcp, full, dmaq_id, + saddr_le32, sport_le16, daddr_le32, dport_le16, + rss_b0 ? " RSS" : "", scat_b0 ? " SCAT" : ""); + + EFHW_ASSERT(filter_i < nic->filter_tbl_size); + + /* dword 4 */ + __DW4CHCK(TCP_UDP_1_LBN, TCP_UDP_1_WIDTH); + __DW4CHCK(RXQ_ID_1_LBN, RXQ_ID_1_WIDTH); + + __RANGECHCK(tcp, TCP_UDP_1_WIDTH); + __RANGECHCK(dmaq_id, RXQ_ID_1_WIDTH); + + /* dword 3 */ + __DW3CHCK(DEST_IP_1_LBN, DEST_IP_1_WIDTH); + __RANGECHCK(daddr_le32, DEST_IP_1_WIDTH); + + /* dword 2 */ + __DW2CHCK(DEST_PORT_TCP_1_LBN, DEST_PORT_TCP_1_WIDTH); + __LWCHK(SRC_IP_1_LBN, SRC_IP_1_WIDTH); + __RANGECHCK(saddr_le32, SRC_IP_1_WIDTH); + + /* dword 1 */ + __DWCHCK(SRC_TCP_DEST_UDP_1_LBN, SRC_TCP_DEST_UDP_1_WIDTH); + __RANGECHCK(sport_le16, SRC_TCP_DEST_UDP_1_WIDTH); + __RANGECHCK(dport_le16, SRC_TCP_DEST_UDP_1_WIDTH); + + /* Falcon requires 128 bit atomic access for this register */ + _falcon_nic_ipfilter_build(nic, tcp, full, rss_b0, scat_b0, + filter_i, dmaq_id, saddr_le32, sport_le16, + daddr_le32, dport_le16, &q0, &q1); + + EFHW_TRACE("%s[%x]@%p+%lx: %" PRIx64 " %" PRIx64, __FUNCTION__, + filter_i, EFHW_KVA(nic), offset, q0, q1); + + falcon_write_qq(EFHW_KVA(nic) + offset, q0, q1); + mmiowb(); + +#if FALCON_VERIFY_FILTERS + { + uint64_t q0read, q1read; + + /* Read a different entry first - entry BIU flushed shadow */ + falcon_read_qq(EFHW_KVA(nic) + offset+0x10, &q0read, &q1read); + falcon_read_qq(EFHW_KVA(nic) + offset, &q0read, &q1read); + EFHW_ASSERT(q0read == q0); + EFHW_ASSERT(q1read == q1); + + _falcon_nic_ipfilter_sanity(nic); + } +#endif +} + +static void _falcon_nic_ipfilter_clear(struct efhw_nic *nic, uint filter_i) +{ + /* TODO: Use filter table 1 as well */ + ulong offset = RX_FILTER_TBL0_OFST + filter_i * 2 * FALCON_REGISTER128; + + EFHW_ASSERT(filter_i < nic->filter_tbl_size); + + EFHW_TRACE("%s[%x]", __FUNCTION__, filter_i); + + /* Falcon requires 128 bit atomic access for this register */ + falcon_write_qq(EFHW_KVA(nic) + offset, 0, 0); + mmiowb(); +#if FALCON_VERIFY_FILTERS + { + uint64_t q0read, q1read; + + /* Read a different entry first - entry BIU flushed shadow */ + falcon_read_qq(EFHW_KVA(nic) + offset+0x10, &q0read, &q1read); + falcon_read_qq(EFHW_KVA(nic) + offset, &q0read, &q1read); + EFHW_ASSERT(q0read == 0); + EFHW_ASSERT(q1read == 0); + + _falcon_nic_ipfilter_sanity(nic); + } +#endif +} + +/*---------------------------------------------------------------------------- + * + * DMAQ low-level register interface + * + *---------------------------------------------------------------------------*/ + +static unsigned dmaq_sizes[] = { + 512, + EFHW_1K, + EFHW_2K, + EFHW_4K, +}; + +#define N_DMAQ_SIZES (sizeof(dmaq_sizes) / sizeof(dmaq_sizes[0])) + +static inline ulong falcon_dma_tx_q_offset(struct efhw_nic *nic, unsigned dmaq) +{ + EFHW_ASSERT(dmaq < FALCON_DMAQ_NUM); + return TX_DESC_PTR_TBL_OFST + dmaq * FALCON_REGISTER128; +} + +static inline uint falcon_dma_tx_q_size_index(uint dmaq_size) +{ + uint i; + + /* size must be one of the various options, otherwise we assert */ + for (i = 0; i < N_DMAQ_SIZES; i++) { + if (dmaq_size == dmaq_sizes[i]) + break; + } + EFHW_ASSERT(i < N_DMAQ_SIZES); + return i; +} + +static void +falcon_dmaq_tx_q_init(struct efhw_nic *nic, + uint dmaq, uint evq_id, uint own_id, + uint tag, uint dmaq_size, uint buf_idx, uint flags) +{ + FALCON_LOCK_DECL; + uint index, desc_type; + uint64_t val1, val2, val3; + ulong offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + /* Q attributes */ + int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_TX_HDIG_EN) != 0); + int iscsi_ddig_en = ((flags & EFHW_VI_ISCSI_TX_DDIG_EN) != 0); + int csum_ip_dis = ((flags & EFHW_VI_TX_IP_CSUM_DIS) != 0); + int csum_tcp_dis = ((flags & EFHW_VI_TX_TCPUDP_CSUM_DIS) != 0); + int non_ip_drop_dis = ((flags & EFHW_VI_TX_TCPUDP_ONLY) == 0); + + /* initialise the TX descriptor queue pointer table */ + + /* NB physical vs buffer addressing is determined by the Queue ID. */ + + offset = falcon_dma_tx_q_offset(nic, dmaq); + index = falcon_dma_tx_q_size_index(dmaq_size); + + /* allow VI flag to override this queue's descriptor type */ + desc_type = (flags & EFHW_VI_TX_PHYS_ADDR_EN) ? 0 : 1; + + /* bug9403: It is dangerous to allow buffer-addressed queues to + * have owner_id=0. */ + EFHW_ASSERT((own_id > 0) || desc_type == 0); + + /* dword 1 */ + __DWCHCK(TX_DESCQ_FLUSH_LBN, TX_DESCQ_FLUSH_WIDTH); + __DWCHCK(TX_DESCQ_TYPE_LBN, TX_DESCQ_TYPE_WIDTH); + __DWCHCK(TX_DESCQ_SIZE_LBN, TX_DESCQ_SIZE_WIDTH); + __DWCHCK(TX_DESCQ_LABEL_LBN, TX_DESCQ_LABEL_WIDTH); + __DWCHCK(TX_DESCQ_OWNER_ID_LBN, TX_DESCQ_OWNER_ID_WIDTH); + + __LWCHK(TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH); + + __RANGECHCK(1, TX_DESCQ_FLUSH_WIDTH); + __RANGECHCK(desc_type, TX_DESCQ_TYPE_WIDTH); + __RANGECHCK(index, TX_DESCQ_SIZE_WIDTH); + __RANGECHCK(tag, TX_DESCQ_LABEL_WIDTH); + __RANGECHCK(own_id, TX_DESCQ_OWNER_ID_WIDTH); + __RANGECHCK(evq_id, TX_DESCQ_EVQ_ID_WIDTH); + + val1 = ((desc_type << TX_DESCQ_TYPE_LBN) | + (index << TX_DESCQ_SIZE_LBN) | + (tag << TX_DESCQ_LABEL_LBN) | + (own_id << TX_DESCQ_OWNER_ID_LBN) | + (__LOW(evq_id, TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH))); + + /* dword 2 */ + __DW2CHCK(TX_DESCQ_BUF_BASE_ID_LBN, TX_DESCQ_BUF_BASE_ID_WIDTH); + __RANGECHCK(buf_idx, TX_DESCQ_BUF_BASE_ID_WIDTH); + + val2 = ((__HIGH(evq_id, TX_DESCQ_EVQ_ID_LBN, TX_DESCQ_EVQ_ID_WIDTH)) | + (buf_idx << __DW2(TX_DESCQ_BUF_BASE_ID_LBN))); + + /* dword 3 */ + __DW3CHCK(TX_ISCSI_HDIG_EN_LBN, TX_ISCSI_HDIG_EN_WIDTH); + __DW3CHCK(TX_ISCSI_DDIG_EN_LBN, TX_ISCSI_DDIG_EN_WIDTH); + __RANGECHCK(iscsi_hdig_en, TX_ISCSI_HDIG_EN_WIDTH); + __RANGECHCK(iscsi_ddig_en, TX_ISCSI_DDIG_EN_WIDTH); + + val3 = ((iscsi_hdig_en << __DW3(TX_ISCSI_HDIG_EN_LBN)) | + (iscsi_ddig_en << __DW3(TX_ISCSI_DDIG_EN_LBN)) | + (1 << __DW3(TX_DESCQ_EN_LBN))); /* queue enable bit */ + + switch (nic->devtype.variant) { + case 'B': + __DW3CHCK(TX_NON_IP_DROP_DIS_B0_LBN, + TX_NON_IP_DROP_DIS_B0_WIDTH); + __DW3CHCK(TX_IP_CHKSM_DIS_B0_LBN, TX_IP_CHKSM_DIS_B0_WIDTH); + __DW3CHCK(TX_TCP_CHKSM_DIS_B0_LBN, TX_TCP_CHKSM_DIS_B0_WIDTH); + + val3 |= ((non_ip_drop_dis << __DW3(TX_NON_IP_DROP_DIS_B0_LBN))| + (csum_ip_dis << __DW3(TX_IP_CHKSM_DIS_B0_LBN)) | + (csum_tcp_dis << __DW3(TX_TCP_CHKSM_DIS_B0_LBN))); + break; + case 'A': + if (csum_ip_dis || csum_tcp_dis || !non_ip_drop_dis) + EFHW_WARN + ("%s: bad settings for A1 csum_ip_dis=%d " + "csum_tcp_dis=%d non_ip_drop_dis=%d", + __FUNCTION__, csum_ip_dis, + csum_tcp_dis, non_ip_drop_dis); + break; + default: + EFHW_ASSERT(0); + break; + } + + EFHW_TRACE("%s: txq %x evq %u tag %x id %x buf %x " + "%x:%x:%x->%" PRIx64 ":%" PRIx64 ":%" PRIx64, + __FUNCTION__, + dmaq, evq_id, tag, own_id, buf_idx, dmaq_size, + iscsi_hdig_en, iscsi_ddig_en, val1, val2, val3); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + return; +} + +static inline ulong +falcon_dma_rx_q_offset(struct efhw_nic *nic, unsigned dmaq) +{ + EFHW_ASSERT(dmaq < FALCON_DMAQ_NUM); + return RX_DESC_PTR_TBL_OFST + dmaq * FALCON_REGISTER128; +} + +static void +falcon_dmaq_rx_q_init(struct efhw_nic *nic, + uint dmaq, uint evq_id, uint own_id, + uint tag, uint dmaq_size, uint buf_idx, uint flags) +{ + FALCON_LOCK_DECL; + uint i, desc_type = 1; + uint64_t val1, val2, val3; + ulong offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + /* Q attributes */ +#if BUG5762_WORKAROUND + int jumbo = 1; /* Queues must not have mixed types */ +#else + int jumbo = ((flags & EFHW_VI_JUMBO_EN) != 0); +#endif + int iscsi_hdig_en = ((flags & EFHW_VI_ISCSI_RX_HDIG_EN) != 0); + int iscsi_ddig_en = ((flags & EFHW_VI_ISCSI_RX_DDIG_EN) != 0); + + /* initialise the TX descriptor queue pointer table */ + offset = falcon_dma_rx_q_offset(nic, dmaq); + + /* size must be one of the various options, otherwise we assert */ + for (i = 0; i < N_DMAQ_SIZES; i++) { + if (dmaq_size == dmaq_sizes[i]) + break; + } + EFHW_ASSERT(i < N_DMAQ_SIZES); + + /* allow VI flag to override this queue's descriptor type */ + desc_type = (flags & EFHW_VI_RX_PHYS_ADDR_EN) ? 0 : 1; + + /* bug9403: It is dangerous to allow buffer-addressed queues to have + * owner_id=0 */ + EFHW_ASSERT((own_id > 0) || desc_type == 0); + + /* dword 1 */ + __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH); + __DWCHCK(RX_DESCQ_JUMBO_LBN, RX_DESCQ_JUMBO_WIDTH); + __DWCHCK(RX_DESCQ_TYPE_LBN, RX_DESCQ_TYPE_WIDTH); + __DWCHCK(RX_DESCQ_SIZE_LBN, RX_DESCQ_SIZE_WIDTH); + __DWCHCK(RX_DESCQ_LABEL_LBN, RX_DESCQ_LABEL_WIDTH); + __DWCHCK(RX_DESCQ_OWNER_ID_LBN, RX_DESCQ_OWNER_ID_WIDTH); + + __LWCHK(RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH); + + __RANGECHCK(1, RX_DESCQ_EN_WIDTH); + __RANGECHCK(jumbo, RX_DESCQ_JUMBO_WIDTH); + __RANGECHCK(desc_type, RX_DESCQ_TYPE_WIDTH); + __RANGECHCK(i, RX_DESCQ_SIZE_WIDTH); + __RANGECHCK(tag, RX_DESCQ_LABEL_WIDTH); + __RANGECHCK(own_id, RX_DESCQ_OWNER_ID_WIDTH); + __RANGECHCK(evq_id, RX_DESCQ_EVQ_ID_WIDTH); + + val1 = ((1 << RX_DESCQ_EN_LBN) | + (jumbo << RX_DESCQ_JUMBO_LBN) | + (desc_type << RX_DESCQ_TYPE_LBN) | + (i << RX_DESCQ_SIZE_LBN) | + (tag << RX_DESCQ_LABEL_LBN) | + (own_id << RX_DESCQ_OWNER_ID_LBN) | + (__LOW(evq_id, RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH))); + + /* dword 2 */ + __DW2CHCK(RX_DESCQ_BUF_BASE_ID_LBN, RX_DESCQ_BUF_BASE_ID_WIDTH); + __RANGECHCK(buf_idx, RX_DESCQ_BUF_BASE_ID_WIDTH); + + val2 = ((__HIGH(evq_id, RX_DESCQ_EVQ_ID_LBN, RX_DESCQ_EVQ_ID_WIDTH)) | + (buf_idx << __DW2(RX_DESCQ_BUF_BASE_ID_LBN))); + + /* dword 3 */ + __DW3CHCK(RX_ISCSI_HDIG_EN_LBN, RX_ISCSI_HDIG_EN_WIDTH); + __DW3CHCK(RX_ISCSI_DDIG_EN_LBN, RX_ISCSI_DDIG_EN_WIDTH); + __RANGECHCK(iscsi_hdig_en, RX_ISCSI_HDIG_EN_WIDTH); + __RANGECHCK(iscsi_ddig_en, RX_ISCSI_DDIG_EN_WIDTH); + + val3 = (iscsi_hdig_en << __DW3(RX_ISCSI_HDIG_EN_LBN)) | + (iscsi_ddig_en << __DW3(RX_ISCSI_DDIG_EN_LBN)); + + EFHW_TRACE("%s: rxq %x evq %u tag %x id %x buf %x %s " + "%x:%x:%x -> %" PRIx64 ":%" PRIx64 ":%" PRIx64, + __FUNCTION__, + dmaq, evq_id, tag, own_id, buf_idx, + jumbo ? "jumbo" : "normal", dmaq_size, + iscsi_hdig_en, iscsi_ddig_en, val1, val2, val3); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + return; +} + +static void falcon_dmaq_tx_q_disable(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + uint64_t val1, val2, val3; + ulong offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + /* initialise the TX descriptor queue pointer table */ + + offset = falcon_dma_tx_q_offset(nic, dmaq); + + /* dword 1 */ + __DWCHCK(TX_DESCQ_TYPE_LBN, TX_DESCQ_TYPE_WIDTH); + + val1 = ((uint64_t) 1 << TX_DESCQ_TYPE_LBN); + + /* dword 2 */ + val2 = 0; + + /* dword 3 */ + val3 = (0 << __DW3(TX_DESCQ_EN_LBN)); /* queue enable bit */ + + EFHW_TRACE("%s: %x->%" PRIx64 ":%" PRIx64 ":%" PRIx64, + __FUNCTION__, dmaq, val1, val2, val3); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + return; +} + +static void falcon_dmaq_rx_q_disable(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + uint64_t val1, val2, val3; + ulong offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + /* initialise the TX descriptor queue pointer table */ + offset = falcon_dma_rx_q_offset(nic, dmaq); + + /* dword 1 */ + __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH); + __DWCHCK(RX_DESCQ_TYPE_LBN, RX_DESCQ_TYPE_WIDTH); + + val1 = ((0 << RX_DESCQ_EN_LBN) | (1 << RX_DESCQ_TYPE_LBN)); + + /* dword 2 */ + val2 = 0; + + /* dword 3 */ + val3 = 0; + + EFHW_TRACE("falcon_dmaq_rx_q_disable: %x->%" + PRIx64 ":%" PRIx64 ":%" PRIx64, + dmaq, val1, val2, val3); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + offset, ((val2 << 32) | val1), val3); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + return; +} + + +/*---------------------------------------------------------------------------- + * + * Buffer Table low-level register interface + * + *---------------------------------------------------------------------------*/ + +/*! Convert a (potentially) 64-bit physical address to 32-bits. Every use +** of this function is a place where we're not 64-bit clean. +*/ +static inline uint32_t dma_addr_to_u32(dma_addr_t addr) +{ + /* Top bits had better be zero! */ + EFHW_ASSERT(addr == (addr & 0xffffffff)); + return (uint32_t) addr; +} + +static inline uint32_t +falcon_nic_buffer_table_entry32_mk(dma_addr_t dma_addr, int own_id) +{ + uint32_t dma_addr32 = FALCON_BUFFER_4K_PAGE(dma_addr_to_u32(dma_addr)); + + /* don't do this to me */ + EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32); + EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN == + BUF_OWNER_ID_HBUF_EVEN_LBN + 32); + + EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_WIDTH == + BUF_OWNER_ID_HBUF_EVEN_WIDTH); + EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_WIDTH == BUF_ADR_HBUF_EVEN_WIDTH); + + __DWCHCK(BUF_ADR_HBUF_EVEN_LBN, BUF_ADR_HBUF_EVEN_WIDTH); + __DWCHCK(BUF_OWNER_ID_HBUF_EVEN_LBN, BUF_OWNER_ID_HBUF_EVEN_WIDTH); + + __RANGECHCK(dma_addr32, BUF_ADR_HBUF_EVEN_WIDTH); + __RANGECHCK(own_id, BUF_OWNER_ID_HBUF_EVEN_WIDTH); + + return ((dma_addr32 << BUF_ADR_HBUF_EVEN_LBN) | + (own_id << BUF_OWNER_ID_HBUF_EVEN_LBN)); +} + +static inline uint64_t +falcon_nic_buffer_table_entry64_mk(dma_addr_t dma_addr, + int bufsz, /* bytes */ + int region, int own_id) +{ + __DW2CHCK(IP_DAT_BUF_SIZE_LBN, IP_DAT_BUF_SIZE_WIDTH); + __DW2CHCK(BUF_ADR_REGION_LBN, BUF_ADR_REGION_WIDTH); + __LWCHK(BUF_ADR_FBUF_LBN, BUF_ADR_FBUF_WIDTH); + __DWCHCK(BUF_OWNER_ID_FBUF_LBN, BUF_OWNER_ID_FBUF_WIDTH); + + EFHW_ASSERT((bufsz == EFHW_4K) || (bufsz == EFHW_8K)); + + dma_addr = (dma_addr >> 12) & __FALCON_MASK64(BUF_ADR_FBUF_WIDTH); + + __RANGECHCK(dma_addr, BUF_ADR_FBUF_WIDTH); + __RANGECHCK(1, IP_DAT_BUF_SIZE_WIDTH); + __RANGECHCK(region, BUF_ADR_REGION_WIDTH); + __RANGECHCK(own_id, BUF_OWNER_ID_FBUF_WIDTH); + + return (((uint64_t) (bufsz == EFHW_8K) << IP_DAT_BUF_SIZE_LBN) | + ((uint64_t) region << BUF_ADR_REGION_LBN) | + ((uint64_t) dma_addr << BUF_ADR_FBUF_LBN) | + ((uint64_t) own_id << BUF_OWNER_ID_FBUF_LBN)); +} + +static inline void +_falcon_nic_buffer_table_set32(struct efhw_nic *nic, + dma_addr_t dma_addr, uint bufsz, + uint region, /* not used */ + int own_id, int buffer_id) +{ + /* programming the half table needs to be done in pairs. */ + uint64_t entry, val, shift; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + efhw_ioaddr_t offset; + + EFHW_BUILD_ASSERT(BUF_ADR_HBUF_ODD_LBN == BUF_ADR_HBUF_EVEN_LBN + 32); + EFHW_BUILD_ASSERT(BUF_OWNER_ID_HBUF_ODD_LBN == + BUF_OWNER_ID_HBUF_EVEN_LBN + 32); + + shift = (buffer_id & 1) ? 32 : 0; + + offset = (efhw_kva + BUF_HALF_TBL_OFST + + ((buffer_id & ~1) * FALCON_BUFFER_TBL_HALF_BYTES)); + + entry = falcon_nic_buffer_table_entry32_mk(dma_addr_to_u32(dma_addr), + own_id); + +#if FALCON_USE_SHADOW_BUFFER_TABLE + val = _falcon_buffer_table[buffer_id & ~1]; +#else + /* This will not work unless we've completed + * the buffer table updates */ + falcon_read_q(offset, &val); +#endif + val &= ~(((uint64_t) 0xffffffff) << shift); + val |= (entry << shift); + + EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt ":%x:%" PRIx64 "->%x = %" + PRIx64, __FUNCTION__, buffer_id, dma_addr, own_id, entry, + (unsigned)(offset - efhw_kva), val); + + /* Falcon requires that access to this register is serialised */ + falcon_write_q(offset, val); + + /* NB. No mmiowb(). Caller should do that e.g by calling commit */ + +#if FALCON_USE_SHADOW_BUFFER_TABLE + _falcon_buffer_table[buffer_id & ~1] = val; +#endif + + /* Confirm the entry if the event queues haven't been set up. */ + if (!nic->irq_handler) { + uint64_t new_val; + int count = 0; + while (1) { + mmiowb(); + falcon_read_q(offset, &new_val); + if (new_val == val) + break; + count++; + if (count > 1000) { + EFHW_WARN("%s: poll Timeout", __FUNCTION__); + break; + } + udelay(1); + } + } +} + +static inline void +_falcon_nic_buffer_table_set64(struct efhw_nic *nic, + dma_addr_t dma_addr, uint bufsz, + uint region, int own_id, int buffer_id) +{ + efhw_ioaddr_t offset; + uint64_t entry; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + EFHW_ASSERT(region < FALCON_REGION_NUM); + + EFHW_ASSERT((bufsz == EFHW_4K) || + (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE)); + + offset = (efhw_kva + BUF_FULL_TBL_OFST + + (buffer_id * FALCON_BUFFER_TBL_FULL_BYTES)); + + entry = falcon_nic_buffer_table_entry64_mk(dma_addr, bufsz, region, + own_id); + + EFHW_TRACE("%s[%x]: " ci_dma_addr_fmt + ":bufsz=%x:region=%x:ownid=%x", + __FUNCTION__, buffer_id, dma_addr, bufsz, region, own_id); + + EFHW_TRACE("%s: BUF[%x]:NIC[%x]->%" PRIx64, + __FUNCTION__, buffer_id, + (unsigned int)(offset - efhw_kva), entry); + + /* Falcon requires that access to this register is serialised */ + falcon_write_q(offset, entry); + + /* NB. No mmiowb(). Caller should do that e.g by calling commit */ + + /* Confirm the entry if the event queues haven't been set up. */ + if (!nic->irq_handler) { + uint64_t new_entry; + int count = 0; + while (1) { + mmiowb(); + falcon_read_q(offset, &new_entry); + if (new_entry == entry) + return; + count++; + if (count > 1000) { + EFHW_WARN("%s: poll Timeout waiting for " + "value %"PRIx64 + " (last was %"PRIx64")", + __FUNCTION__, entry, new_entry); + break; + } + udelay(1); + } + } +} + +#if FALCON_BUFFER_TABLE_FULL_MODE +#define _falcon_nic_buffer_table_set _falcon_nic_buffer_table_set64 +#else +#define _falcon_nic_buffer_table_set _falcon_nic_buffer_table_set32 +#endif + +static inline void _falcon_nic_buffer_table_commit(struct efhw_nic *nic) +{ + /* MUST be called holding the FALCON_LOCK */ + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + uint64_t cmd; + + EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST); + + __DW2CHCK(BUF_UPD_CMD_LBN, BUF_UPD_CMD_WIDTH); + __RANGECHCK(1, BUF_UPD_CMD_WIDTH); + + cmd = ((uint64_t) 1 << BUF_UPD_CMD_LBN); + + /* Falcon requires 128 bit atomic access for this register */ + falcon_write_qq(efhw_kva + BUF_TBL_UPD_REG_OFST, + cmd, FALCON_ATOMIC_UPD_REG); + mmiowb(); + + nic->buf_commit_outstanding++; + EFHW_TRACE("COMMIT REQ out=%d", nic->buf_commit_outstanding); +} + +static void falcon_nic_buffer_table_commit(struct efhw_nic *nic) +{ + /* nothing to do */ +} + +static inline void +_falcon_nic_buffer_table_clear(struct efhw_nic *nic, int buffer_id, int num) +{ + uint64_t cmd; + uint64_t start_id = buffer_id; + uint64_t end_id = buffer_id + num - 1; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + efhw_ioaddr_t offset = (efhw_kva + BUF_TBL_UPD_REG_OFST); + + EFHW_BUILD_ASSERT(BUF_TBL_UPD_REG_KER_OFST == BUF_TBL_UPD_REG_OFST); + +#if !FALCON_BUFFER_TABLE_FULL_MODE + /* buffer_ids in half buffer mode reference pairs of buffers */ + EFHW_ASSERT(buffer_id % 1 == 0); + EFHW_ASSERT(num % 1 == 0); + start_id = start_id >> 1; + end_id = end_id >> 1; +#endif + + EFHW_ASSERT(num >= 1); + + __DWCHCK(BUF_CLR_START_ID_LBN, BUF_CLR_START_ID_WIDTH); + __DW2CHCK(BUF_CLR_END_ID_LBN, BUF_CLR_END_ID_WIDTH); + + __DW2CHCK(BUF_CLR_CMD_LBN, BUF_CLR_CMD_WIDTH); + __RANGECHCK(1, BUF_CLR_CMD_WIDTH); + + __RANGECHCK(start_id, BUF_CLR_START_ID_WIDTH); + __RANGECHCK(end_id, BUF_CLR_END_ID_WIDTH); + + cmd = (((uint64_t) 1 << BUF_CLR_CMD_LBN) | + (start_id << BUF_CLR_START_ID_LBN) | + (end_id << BUF_CLR_END_ID_LBN)); + + /* Falcon requires 128 bit atomic access for this register */ + falcon_write_qq(offset, cmd, FALCON_ATOMIC_UPD_REG); + mmiowb(); + + nic->buf_commit_outstanding++; + EFHW_TRACE("COMMIT CLEAR out=%d", nic->buf_commit_outstanding); +} + +/*---------------------------------------------------------------------------- + * + * Events low-level register interface + * + *---------------------------------------------------------------------------*/ + +static unsigned eventq_sizes[] = { + 512, + EFHW_1K, + EFHW_2K, + EFHW_4K, + EFHW_8K, + EFHW_16K, + EFHW_32K +}; + +#define N_EVENTQ_SIZES (sizeof(eventq_sizes) / sizeof(eventq_sizes[0])) + +static inline void falcon_nic_srm_upd_evq(struct efhw_nic *nic, int evq) +{ + /* set up the eventq which will receive events from the SRAM module. + * i.e buffer table updates and clears, TX and RX aperture table + * updates */ + + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + EFHW_BUILD_ASSERT(SRM_UPD_EVQ_REG_OFST == SRM_UPD_EVQ_REG_KER_OFST); + + EFHW_ASSERT((evq == FALCON_EVQ_KERNEL0) || (evq == FALCON_EVQ_CHAR) || + (evq == FALCON_EVQ_NONIRQ)); + + __DWCHCK(SRM_UPD_EVQ_ID_LBN, SRM_UPD_EVQ_ID_WIDTH); + __RANGECHCK(evq, SRM_UPD_EVQ_ID_WIDTH); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + SRM_UPD_EVQ_REG_OFST, + ((uint64_t) evq << SRM_UPD_EVQ_ID_LBN), + FALCON_ATOMIC_SRPM_UDP_EVQ_REG); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +static inline void +falcon_nic_evq_ptr_tbl(struct efhw_nic *nic, + uint evq, /* evq id */ + uint enable, /* 1 to enable, 0 to disable */ + uint buf_base_id,/* Buffer table base for EVQ */ + uint evq_size /* Number of events */ ) +{ + FALCON_LOCK_DECL; + uint i, val; + ulong offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + /* size must be one of the various options, otherwise we assert */ + for (i = 0; i < N_EVENTQ_SIZES; i++) { + if (evq_size <= eventq_sizes[i]) + break; + } + EFHW_ASSERT(i < N_EVENTQ_SIZES); + + __DWCHCK(EVQ_BUF_BASE_ID_LBN, EVQ_BUF_BASE_ID_WIDTH); + __DWCHCK(EVQ_SIZE_LBN, EVQ_SIZE_WIDTH); + __DWCHCK(EVQ_EN_LBN, EVQ_EN_WIDTH); + + __RANGECHCK(i, EVQ_SIZE_WIDTH); + __RANGECHCK(buf_base_id, EVQ_BUF_BASE_ID_WIDTH); + __RANGECHCK(1, EVQ_EN_WIDTH); + + /* if !enable then only evq needs to be correct, although valid + * values need to be passed in for other arguments to prevent + * assertions */ + + val = ((i << EVQ_SIZE_LBN) | (buf_base_id << EVQ_BUF_BASE_ID_LBN) | + (enable ? (1 << EVQ_EN_LBN) : 0)); + + EFHW_ASSERT(evq < FALCON_EVQ_TBL_NUM); + + offset = EVQ_PTR_TBL_CHAR_OFST; + offset += evq * FALCON_REGISTER128; + + EFHW_TRACE("%s: evq %u en=%x:buf=%x:size=%x->%x at %lx", + __FUNCTION__, evq, enable, buf_base_id, evq_size, val, + offset); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + offset, val, FALCON_ATOMIC_PTR_TBL_REG); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + + /* caller must wait for an update done event before writing any more + table entries */ + + return; +} + +void +falcon_nic_evq_ack(struct efhw_nic *nic, + uint evq, /* evq id */ + uint rptr, /* new read pointer update */ + bool wakeup /* request a wakeup event if ptr's != */ + ) +{ + uint val; + ulong offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4); + + __DWCHCK(EVQ_RPTR_LBN, EVQ_RPTR_WIDTH); + __RANGECHCK(rptr, EVQ_RPTR_WIDTH); + + val = (rptr << EVQ_RPTR_LBN); + + EFHW_ASSERT(evq < FALCON_EVQ_TBL_NUM); + + if (evq < FALCON_EVQ_CHAR) { + offset = EVQ_RPTR_REG_KER_OFST; + offset += evq * FALCON_REGISTER128; + + EFHW_ASSERT(!wakeup); /* don't try this at home */ + } else { + offset = EVQ_RPTR_REG_OFST + (FALCON_EVQ_CHAR * + FALCON_REGISTER128); + offset += (evq - FALCON_EVQ_CHAR) * FALCON_REGISTER128; + + /* nothing to do for interruptless event queues which do + * not want a wakeup */ + if (evq != FALCON_EVQ_CHAR && !wakeup) + return; + } + + EFHW_TRACE("%s: %x %x %x->%x", __FUNCTION__, evq, rptr, wakeup, val); + + writel(val, efhw_kva + offset); + mmiowb(); +} + +/*---------------------------------------------------------------------------- + * + * Helper for evq mapping + * + * idx = 0 && char => hw eventq[4] + * idx = 0 && net => hw eventq[0] + * 0 < idx < 5 => hw eventq[idx] (5 is non-interrupting) + * + * + *---------------------------------------------------------------------------*/ + +int falcon_idx_to_evq(struct efhw_nic *nic, uint idx) +{ + EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4); + EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ); + return (idx > 0) ? idx : FALCON_EVQ_CHAR; +} + +static inline int falcon_evq_is_interrupting(struct efhw_nic *nic, uint idx) +{ + EFHW_BUILD_ASSERT(FALCON_EVQ_CHAR == 4); + EFHW_ASSERT(idx <= FALCON_EVQ_NONIRQ); + + /* only the first CHAR driver event queue is interrupting */ + return (idx == FALCON_EVQ_CHAR); +} + +static inline void +falcon_drv_ev(struct efhw_nic *nic, uint64_t data, uint qid) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + /* send an event from one driver to the other */ + EFHW_BUILD_ASSERT(DRV_EV_REG_KER_OFST == DRV_EV_REG_OFST); + EFHW_BUILD_ASSERT(DRV_EV_DATA_LBN == 0); + EFHW_BUILD_ASSERT(DRV_EV_DATA_WIDTH == 64); + EFHW_BUILD_ASSERT(DRV_EV_QID_LBN == 64); + EFHW_BUILD_ASSERT(DRV_EV_QID_WIDTH == 12); + + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + DRV_EV_REG_OFST, data, qid); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +_DEBUG_SYM_ void +falcon_timer_cmd(struct efhw_nic *nic, + uint evq, /* timer id */ + uint mode, /* mode bits */ + uint countdown /* counting value to set */ ) +{ + FALCON_LOCK_DECL; + uint val; + ulong offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + EFHW_BUILD_ASSERT(TIMER_VAL_LBN == 0); + + __DWCHCK(TIMER_MODE_LBN, TIMER_MODE_WIDTH); + __DWCHCK(TIMER_VAL_LBN, TIMER_VAL_WIDTH); + + __RANGECHCK(mode, TIMER_MODE_WIDTH); + __RANGECHCK(countdown, TIMER_VAL_WIDTH); + + val = ((mode << TIMER_MODE_LBN) | (countdown << TIMER_VAL_LBN)); + + if (evq < FALCON_EVQ_CHAR) { + offset = TIMER_CMD_REG_KER_OFST; + offset += evq * EFHW_8K; /* PAGE mapped register */ + } else { + offset = TIMER_TBL_OFST; + offset += evq * FALCON_REGISTER128; + } + EFHW_ASSERT(evq < FALCON_EVQ_TBL_NUM); + + EFHW_TRACE("%s: evq %u mode %x (%s) time %x -> %08x", + __FUNCTION__, evq, mode, + mode == 0 ? "DISABLE" : + mode == 1 ? "IMMED" : + mode == 2 ? (evq < 5 ? "HOLDOFF" : "RX_TRIG") : + "", countdown, val); + + /* Falcon requires 128 bit atomic access for this register when + * accessed from the driver. User access to timers is paged mapped + */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + offset, val, FALCON_ATOMIC_TIMER_CMD_REG); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + return; +} + +/*-------------------------------------------------------------------- + * + * Rate pacing - Low level interface + * + *--------------------------------------------------------------------*/ +void falcon_nic_pace(struct efhw_nic *nic, uint dmaq, uint pace) +{ + /* Pace specified in 2^(units of microseconds). This is the minimum + additional delay imposed over and above the IPG. + + Pacing only available on the virtual interfaces + */ + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + ulong offset; + + if (pace > 20) + pace = 20; /* maxm supported value */ + + __DWCHCK(TX_PACE_LBN, TX_PACE_WIDTH); + __RANGECHCK(pace, TX_PACE_WIDTH); + + switch (nic->devtype.variant) { + case 'A': + EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_A1); + offset = TX_PACE_TBL_A1_OFST; + offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_A1) * 16; + break; + case 'B': + /* Would be nice to assert this, but as dmaq is unsigned and + * TX_PACE_TBL_FIRST_QUEUE_B0 is 0, it makes no sense + * EFHW_ASSERT(dmaq >= TX_PACE_TBL_FIRST_QUEUE_B0); + */ + offset = TX_PACE_TBL_B0_OFST; + offset += (dmaq - TX_PACE_TBL_FIRST_QUEUE_B0) * 16; + break; + default: + EFHW_ASSERT(0); + offset = 0; + break; + } + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + offset, pace, FALCON_ATOMIC_PACE_REG); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + + EFHW_TRACE("%s: txq %d offset=%lx pace=2^%x", + __FUNCTION__, dmaq, offset, pace); +} + +/*-------------------------------------------------------------------- + * + * Interrupt - Low level interface + * + *--------------------------------------------------------------------*/ + +static void falcon_nic_handle_fatal_int(struct efhw_nic *nic) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + uint64_t val; + + offset = (efhw_kva + FATAL_INTR_REG_OFST); + + /* Falcon requires 32 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + val = readl(offset); + FALCON_LOCK_UNLOCK(nic); + + /* ?? BUG3249 - need to disable illegal address interrupt */ + /* ?? BUG3114 - need to backport interrupt storm protection code */ + EFHW_ERR("fatal interrupt: %s%s%s%s%s%s%s%s%s%s%s%s[%" PRIx64 "]", + val & (1 << PCI_BUSERR_INT_CHAR_LBN) ? "PCI-bus-error " : "", + val & (1 << SRAM_OOB_INT_CHAR_LBN) ? "SRAM-oob " : "", + val & (1 << BUFID_OOB_INT_CHAR_LBN) ? "bufid-oob " : "", + val & (1 << MEM_PERR_INT_CHAR_LBN) ? "int-parity " : "", + val & (1 << RBUF_OWN_INT_CHAR_LBN) ? "rx-bufid-own " : "", + val & (1 << TBUF_OWN_INT_CHAR_LBN) ? "tx-bufid-own " : "", + val & (1 << RDESCQ_OWN_INT_CHAR_LBN) ? "rx-desc-own " : "", + val & (1 << TDESCQ_OWN_INT_CHAR_LBN) ? "tx-desc-own " : "", + val & (1 << EVQ_OWN_INT_CHAR_LBN) ? "evq-own " : "", + val & (1 << EVFF_OFLO_INT_CHAR_LBN) ? "evq-fifo " : "", + val & (1 << ILL_ADR_INT_CHAR_LBN) ? "ill-addr " : "", + val & (1 << SRM_PERR_INT_CHAR_LBN) ? "sram-parity " : "", val); +} + +static void falcon_nic_interrupt_hw_enable(struct efhw_nic *nic) +{ + FALCON_LOCK_DECL; + uint val; + efhw_ioaddr_t offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_WIDTH == 1); + + if (nic->flags & NIC_FLAG_NO_INTERRUPT) + return; + + offset = (efhw_kva + INT_EN_REG_CHAR_OFST); + val = 1 << DRV_INT_EN_CHAR_LBN; + + EFHW_NOTICE("%s: %x -> %x", __FUNCTION__, (int)(offset - efhw_kva), + val); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(offset, val, FALCON_ATOMIC_INT_EN_REG); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +static void falcon_nic_interrupt_hw_disable(struct efhw_nic *nic) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_WIDTH == 1); + EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == 0); + EFHW_BUILD_ASSERT(SRAM_PERR_INT_CHAR_WIDTH == 1); + EFHW_BUILD_ASSERT(DRV_INT_EN_CHAR_LBN == 0); + EFHW_BUILD_ASSERT(SRAM_PERR_INT_KER_LBN == SRAM_PERR_INT_CHAR_LBN); + EFHW_BUILD_ASSERT(DRV_INT_EN_KER_LBN == DRV_INT_EN_CHAR_LBN); + + if (nic->flags & NIC_FLAG_NO_INTERRUPT) + return; + + offset = (efhw_kva + INT_EN_REG_CHAR_OFST); + + EFHW_NOTICE("%s: %x -> 0", __FUNCTION__, (int)(offset - efhw_kva)); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(offset, 0, FALCON_ATOMIC_INT_EN_REG); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +#ifndef __ci_ul_driver__ + +static void falcon_nic_irq_addr_set(struct efhw_nic *nic, dma_addr_t dma_addr) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t offset; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + + offset = (efhw_kva + INT_ADR_REG_CHAR_OFST); + + EFHW_NOTICE("%s: %x -> " DMA_ADDR_T_FMT, __FUNCTION__, + (int)(offset - efhw_kva), dma_addr); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(offset, dma_addr, FALCON_ATOMIC_INT_ADR_REG); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +#endif + + +/*-------------------------------------------------------------------- + * + * RXDP - low level interface + * + *--------------------------------------------------------------------*/ + +void +falcon_nic_set_rx_usr_buf_size(struct efhw_nic *nic, int usr_buf_bytes) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + uint64_t val, val2, usr_buf_size = usr_buf_bytes / 32; + int rubs_lbn, rubs_width, roec_lbn; + + EFHW_BUILD_ASSERT(RX_CFG_REG_OFST == RX_CFG_REG_KER_OFST); + + switch (nic->devtype.variant) { + default: + EFHW_ASSERT(0); + /* Fall-through to avoid compiler warnings. */ + case 'A': + rubs_lbn = RX_USR_BUF_SIZE_A1_LBN; + rubs_width = RX_USR_BUF_SIZE_A1_WIDTH; + roec_lbn = RX_OWNERR_CTL_A1_LBN; + break; + case 'B': + rubs_lbn = RX_USR_BUF_SIZE_B0_LBN; + rubs_width = RX_USR_BUF_SIZE_B0_WIDTH; + roec_lbn = RX_OWNERR_CTL_B0_LBN; + break; + } + + __DWCHCK(rubs_lbn, rubs_width); + __QWCHCK(roec_lbn, 1); + __RANGECHCK(usr_buf_size, rubs_width); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_read_qq(efhw_kva + RX_CFG_REG_OFST, &val, &val2); + + val &= ~((__FALCON_MASK64(rubs_width)) << rubs_lbn); + val |= (usr_buf_size << rubs_lbn); + + /* shouldn't be needed for a production driver */ + val |= ((uint64_t) 1 << roec_lbn); + + falcon_write_qq(efhw_kva + RX_CFG_REG_OFST, val, val2); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} +EXPORT_SYMBOL(falcon_nic_set_rx_usr_buf_size); + +void +falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full, + uint32_t *tcp_wild, + uint32_t *udp_full, uint32_t *udp_wild) +{ + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + FALCON_LOCK_DECL; + uint64_t val; + + FALCON_LOCK_LOCK(nic); + falcon_read_q(efhw_kva + RX_FILTER_CTL_REG_OFST, &val); + FALCON_LOCK_UNLOCK(nic); + + *tcp_full = (uint32_t)((val >> TCP_FULL_SRCH_LIMIT_LBN) & + (__FALCON_MASK64(TCP_FULL_SRCH_LIMIT_WIDTH))); + + *tcp_wild = (uint32_t)((val >> TCP_WILD_SRCH_LIMIT_LBN) & + (__FALCON_MASK64(TCP_WILD_SRCH_LIMIT_WIDTH))); + + *udp_full = (uint32_t)((val >> UDP_FULL_SRCH_LIMIT_LBN) & + (__FALCON_MASK64(UDP_FULL_SRCH_LIMIT_WIDTH))); + + *udp_wild = (uint32_t)((val >> UDP_WILD_SRCH_LIMIT_LBN) & + (__FALCON_MASK64(UDP_WILD_SRCH_LIMIT_WIDTH))); +} +EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_get); + +void +falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full, + uint32_t tcp_wild, + uint32_t udp_full, uint32_t udp_wild) +{ + uint64_t val, val2; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + FALCON_LOCK_DECL; + + EFHW_ASSERT(tcp_full < nic->filter_tbl_size); + EFHW_ASSERT(tcp_wild < nic->filter_tbl_size); + EFHW_ASSERT(udp_full < nic->filter_tbl_size); + EFHW_ASSERT(udp_wild < nic->filter_tbl_size); + + /* until we implement a dynamic scaling of search limits we wish to + * maintain the same limits set up by default in the net driver + * when we initialize the char driver */ + tcp_full_srch_limit = tcp_full; + tcp_wild_srch_limit = tcp_wild; + udp_full_srch_limit = udp_full; + udp_wild_srch_limit = udp_wild; + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_read_qq(efhw_kva + RX_FILTER_CTL_REG_OFST, &val, &val2); + + /* Search limits */ + val &= ~((__FALCON_MASK64(TCP_FULL_SRCH_LIMIT_WIDTH)) + << TCP_FULL_SRCH_LIMIT_LBN); + + val |= ((uint64_t)tcp_full + RX_FILTER_CTL_SRCH_FUDGE_FULL) + << TCP_FULL_SRCH_LIMIT_LBN; + + val &= ~((__FALCON_MASK64(TCP_WILD_SRCH_LIMIT_WIDTH)) + << TCP_WILD_SRCH_LIMIT_LBN); + + val |= ((uint64_t)tcp_wild + RX_FILTER_CTL_SRCH_FUDGE_WILD) + << TCP_WILD_SRCH_LIMIT_LBN; + + val &= ~((__FALCON_MASK64(UDP_FULL_SRCH_LIMIT_WIDTH)) + << UDP_FULL_SRCH_LIMIT_LBN); + + val |= ((uint64_t)udp_full + RX_FILTER_CTL_SRCH_FUDGE_FULL) + << UDP_FULL_SRCH_LIMIT_LBN; + + val &= ~((__FALCON_MASK64(UDP_WILD_SRCH_LIMIT_WIDTH)) + << UDP_WILD_SRCH_LIMIT_LBN); + + val |= ((uint64_t)udp_wild + RX_FILTER_CTL_SRCH_FUDGE_WILD) + << UDP_WILD_SRCH_LIMIT_LBN; + + falcon_write_qq(efhw_kva + RX_FILTER_CTL_REG_OFST, val, val2); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} +EXPORT_SYMBOL(falcon_nic_rx_filter_ctl_set); + +/*-------------------------------------------------------------------- + * + * TXDP - low level interface + * + *--------------------------------------------------------------------*/ + +_DEBUG_SYM_ void falcon_nic_tx_cfg(struct efhw_nic *nic, int unlocked) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + uint64_t val1, val2; + + EFHW_BUILD_ASSERT(TX_CFG_REG_OFST == TX_CFG_REG_KER_OFST); + __DWCHCK(TX_OWNERR_CTL_LBN, TX_OWNERR_CTL_WIDTH); + __DWCHCK(TX_NON_IP_DROP_DIS_LBN, TX_NON_IP_DROP_DIS_WIDTH); + + FALCON_LOCK_LOCK(nic); + falcon_read_qq(efhw_kva + TX_CFG_REG_OFST, &val1, &val2); + + /* Will flag fatal interrupts on owner id errors. This should not be + on for production code because there is otherwise a denial of + serivce attack possible */ + val1 |= (1 << TX_OWNERR_CTL_LBN); + + /* Setup user queue TCP/UDP only packet security */ + if (unlocked) + val1 |= (1 << TX_NON_IP_DROP_DIS_LBN); + else + val1 &= ~(1 << TX_NON_IP_DROP_DIS_LBN); + + falcon_write_qq(efhw_kva + TX_CFG_REG_OFST, val1, val2); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +/*-------------------------------------------------------------------- + * + * Random thresholds - Low level interface (Would like these to be op + * defaults wherever possible) + * + *--------------------------------------------------------------------*/ + +static void falcon_nic_pace_cfg(struct efhw_nic *nic) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + unsigned offset = 0; + uint64_t val; + + val = 0xa81682; /* !!!! */ + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + switch (nic->devtype.variant) { + case 'A': offset = TX_PACE_REG_A1_OFST; break; + case 'B': offset = TX_PACE_REG_B0_OFST; break; + default: EFHW_ASSERT(0); break; + } + falcon_write_qq(efhw_kva + offset, val, 0); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +/********************************************************************** + * Supporting modules. ************************************************ + **********************************************************************/ + +/*-------------------------------------------------------------------- + * + * Filter support + * + *--------------------------------------------------------------------*/ + +/*! \TODO this table should be per nic */ +struct falcon_cached_ipfilter { +#if FALCON_FULL_FILTER_CACHE + unsigned dmaq; + unsigned saddr_le32; + unsigned daddr_le32; + unsigned sport_le16; + unsigned dport_le16; + unsigned tcp:1; + unsigned full:1; + unsigned rss_b0:1; + unsigned scat_b0:1; +#endif + unsigned addr_valid:1; + +}; + + +/* TODO: Dynamically allocate this and store in struct efhw_nic. */ +static struct falcon_cached_ipfilter + host_ipfilter_cache[EFHW_MAX_NR_DEVS][FALCON_FILTER_TBL_NUM]; + + +static inline void host_ipfilter_cache_init(struct efhw_nic *nic) +{ + memset(host_ipfilter_cache[nic->index], 0, + sizeof(host_ipfilter_cache[0][0]) * nic->filter_tbl_size); +} + +static inline int host_ipfilter_cache_active(struct efhw_nic *nic, uint idx) +{ + EFHW_ASSERT(nic->index < EFHW_MAX_NR_DEVS); + EFHW_ASSERT(idx < nic->filter_tbl_size); + + return (host_ipfilter_cache[nic->index][idx].addr_valid); + +} + +static inline void host_ipfilter_cache_flush(struct efhw_nic *nic, uint idx) +{ + EFHW_ASSERT(nic->index < EFHW_MAX_NR_DEVS); + EFHW_ASSERT(idx < nic->filter_tbl_size); + + memset(&host_ipfilter_cache[nic->index][idx], 0, + sizeof(struct falcon_cached_ipfilter)); + mmiowb(); +} + +static inline void +host_ipfilter_cache_set_addr(struct efhw_nic *nic, uint idx, uint dmaq, + unsigned tcp, unsigned full, + unsigned rss_b0, unsigned scat_b0, + unsigned saddr_le32, unsigned sport_le16, + unsigned daddr_le32, unsigned dport_le16) +{ + unsigned nic_i = nic->index; + + EFHW_ASSERT(nic_i < EFHW_MAX_NR_DEVS); + EFHW_ASSERT(idx < nic->filter_tbl_size); + EFHW_ASSERT(!host_ipfilter_cache[nic_i][idx].addr_valid); + + __RANGECHCK(sport_le16, SRC_TCP_DEST_UDP_1_WIDTH); + __RANGECHCK(dport_le16, SRC_TCP_DEST_UDP_1_WIDTH); + +#if FALCON_FULL_FILTER_CACHE + host_ipfilter_cache[nic_i][idx].dmaq = dmaq; + host_ipfilter_cache[nic_i][idx].saddr_le32 = saddr_le32; + host_ipfilter_cache[nic_i][idx].daddr_le32 = daddr_le32; + host_ipfilter_cache[nic_i][idx].sport_le16 = sport_le16; + host_ipfilter_cache[nic_i][idx].dport_le16 = dport_le16; + host_ipfilter_cache[nic_i][idx].tcp = tcp; + host_ipfilter_cache[nic_i][idx].full = full; + host_ipfilter_cache[nic_i][idx].rss_b0 = rss_b0; + host_ipfilter_cache[nic_i][idx].scat_b0 = scat_b0; +#endif + host_ipfilter_cache[nic_i][idx].addr_valid = 1; + mmiowb(); +} + +#if FALCON_VERIFY_FILTERS +/* Check that all active filters still exist by reading from H/W */ +static void _falcon_nic_ipfilter_sanity(struct efhw_nic *nic) +{ + unsigned i; + struct falcon_cached_ipfilter *f; + uint64_t q0_expect, q1_expect, q0_got, q1_got; + + for (i = 0; i < nic->filter_tbl_size; i++) { + f = host_ipfilter_cache[nic->index] + i; + if (!f->addr_valid) + continue; + + _falcon_nic_ipfilter_build(nic, f->tcp, f->full, + f->rss_b0, f->scat_b0, i, f->dmaq, + f->saddr_le32, f->sport_le16, + f->daddr_le32, f->dport_le16, + &q0_expect, &q1_expect); + + falcon_read_qq(EFHW_KVA(nic) + RX_FILTER_TBL0_OFST + + i * 2 * FALCON_REGISTER128, + &q0_got, &q1_got); + + if ((q0_got != q0_expect) || (q1_got != q1_expect)) { + falcon_write_qq(EFHW_KVA(nic) + 0x300, + q0_got, q1_got); + EFHW_ERR("ERROR: RX-filter[%d][%d] was " + "%"PRIx64":%" PRIx64" expected " + "%"PRIx64":%"PRIx64, + nic->index, i, q0_got, q1_got, + q0_expect, q1_expect); + } + } +} +#endif /* FALCON_VERIFY_FILTERS */ + +#if FALCON_FULL_FILTER_CACHE +static inline int +host_ipfilter_cache_check_not(uint nic, uint idx, int tcp, int full, + unsigned saddr_le32, unsigned sport_le16, + unsigned daddr_le32, unsigned dport_le16) +{ + return ((host_ipfilter_cache[nic][idx].saddr_le32 != saddr_le32) || + (host_ipfilter_cache[nic][idx].daddr_le32 != daddr_le32) || + (host_ipfilter_cache[nic][idx].sport_le16 != sport_le16) || + (host_ipfilter_cache[nic][idx].dport_le16 != dport_le16) || + (host_ipfilter_cache[nic][idx].tcp != tcp) || + (host_ipfilter_cache[nic][idx].full != full)); +} +#endif + +#define host_ipfilter_cache_saddr_le32(nic, idx) \ + host_ipfilter_cache[nic][idx].saddr_le32 +#define host_ipfilter_cache_daddr_le32(nic, idx) \ + host_ipfilter_cache[nic][idx].daddr_le32 +#define host_ipfilter_cache_sport_le16(nic, idx) \ + host_ipfilter_cache[nic][idx].sport_le16 +#define host_ipfilter_cache_dport_le16(nic, idx) \ + host_ipfilter_cache[nic][idx].dport_le16 +#define host_ipfilter_cache_tcp(nic, idx) \ + host_ipfilter_cache[nic][idx].tcp +#define host_ipfilter_cache_full(nic, idx) \ + host_ipfilter_cache[nic][idx].full + +/********************************************************************** + * Implementation of the HAL. ******************************************** + **********************************************************************/ + +/*---------------------------------------------------------------------------- + * + * Initialisation and configuration discovery + * + *---------------------------------------------------------------------------*/ + +#ifdef __ci_ul_driver__ + +static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable) +{ + EFHW_ERR("%s: not implemented for ul driver", __FUNCTION__); + return -EOPNOTSUPP; +} + +#else + +static int falcon_nic_init_irq_channel(struct efhw_nic *nic, int enable) +{ + /* create a buffer for the irq channel */ + int rc; + + if (enable) { + rc = efhw_iopage_alloc(nic, &nic->irq_iobuff); + if (rc < 0) + return rc; + + falcon_nic_irq_addr_set(nic, + efhw_iopage_dma_addr(&nic->irq_iobuff)); + } else { + if (efhw_iopage_is_valid(&nic->irq_iobuff)) + efhw_iopage_free(nic, &nic->irq_iobuff); + + efhw_iopage_mark_invalid(&nic->irq_iobuff); + falcon_nic_irq_addr_set(nic, 0); + } + + EFHW_TRACE("%s: " ci_dma_addr_fmt " %sable", __FUNCTION__, + efhw_iopage_dma_addr(&nic->irq_iobuff), enable ? + "en" : "dis"); + + return 0; +} + +#endif + +static void falcon_nic_close_hardware(struct efhw_nic *nic) +{ + /* check we are in possession of some hardware */ + if (!efhw_nic_have_hw(nic)) + return; + + falcon_nic_init_irq_channel(nic, 0); + + EFHW_NOTICE("%s:", __FUNCTION__); +} + +#ifdef __ci_ul_driver__ +extern +#else +static +#endif +int falcon_nic_get_mac_config(struct efhw_nic *nic) +{ + efhw_ioaddr_t efhw_kva = nic->bar_ioaddr; + int is_mac_type_1g; + uint32_t strap, altera; + uint64_t rx_cfg, r; + + altera = readl(efhw_kva + ALTERA_BUILD_REG_OFST); + strap = readl(efhw_kva + STRAP_REG_KER_OFST) & 0x7; + + switch (nic->devtype.variant) { + case 'A': + if ((altera & 0x0fff0000) == 0x1130000) { + strap = 2; /* FPGA - PCI-X 2G */ + } else if ((altera & 0x00ff0000) == 0x140000) { + /* should be 114 */ + strap = 4; /* FPGA - PCI-X 4G */ + } else if (strap < 2 || strap > 5) { + EFHW_ERR("Invalid strap option %d altera_buid_ver=%x", + strap, altera); + return -EINVAL; + } + is_mac_type_1g = (0 != (strap & 2)); + break; + case 'B': + /* Runtime check that the hardware and software agree about + * the size of the RXFIFO. Write binary 11 across the left + * most bit, and assert we get 1 back. + */ + r = 1LL << RX_TOEP_TCP_SUPPRESS_B0_LBN; + r |= (r << 1); + + /* Save the original value */ + falcon_read_q(efhw_kva + RX_CFG_REG_OFST, &rx_cfg); + + /* Write and ready the dummy value */ + falcon_write_qq(efhw_kva + RX_CFG_REG_OFST, r, 0); + falcon_read_q(efhw_kva + RX_CFG_REG_OFST, &r); + + /* Restore the original value */ + falcon_write_qq(efhw_kva + RX_CFG_REG_OFST, rx_cfg, 0); + + if (r != (1LL << RX_TOEP_TCP_SUPPRESS_B0_LBN)) { + EFHW_ERR("The FPGA build (%x) RXFIFO size does not " + "match the software", altera); + return -EINVAL; + } + is_mac_type_1g = (0 != (strap & 2)); +#if FALCON_MAC_SET_TYPE_BY_SPEED + /* Check the selected strap pins against the MAC speed - + * and adjust if necessary. + */ + { + int speed; + speed = readl(efhw_kva + MAC0_CTRL_REG_OFST) & 0x3; + is_mac_type_1g = (speed <= 2); + } +#endif + break; + default: + EFHW_ASSERT(0); + is_mac_type_1g = 0; + break; + } + + nic->fpga_version = altera; + + /* We can now set the MAC type correctly based on the strap pins. */ + if (is_mac_type_1g) { + nic->flags &= ~NIC_FLAG_10G; + } else { + /* strap & 4 must be set according to checks above */ + nic->flags |= NIC_FLAG_10G; + } + EFHW_NOTICE("Board has %s MAC: strap=%d", + 0 != (nic->flags & NIC_FLAG_10G) ? "10G" : "1G", strap); + return 0; +} + +static int +falcon_nic_init_hardware(struct efhw_nic *nic, + struct efhw_ev_handler *ev_handlers, + const uint8_t *mac_addr) +{ + int rc; + + /* header sanity checks */ + FALCON_ASSERT_VALID(); + + rc = falcon_nic_get_mac_config(nic); + if (rc < 0) + return rc; + + /* Initialise supporting modules */ + falcon_nic_ipfilter_ctor(nic); + +#if FALCON_USE_SHADOW_BUFFER_TABLE + CI_ZERO_ARRAY(_falcon_buffer_table, FALCON_BUFFER_TBL_NUM); +#endif + + /* Initialise the top level hardware blocks */ + memcpy(nic->mac_addr, mac_addr, ETH_ALEN); + + EFHW_TRACE("%s:", __FUNCTION__); + + /* nic.c:efhw_nic_init marks all the interrupt units as unused. + + ?? TODO we should be able to request the non-interrupting event + queue and the net driver's (for a net driver that is using libefhw) + additional RSS queues here. + + Result would be that that net driver could call + nic.c:efhw_nic_allocate_common_hardware_resources() and that the + IFDEF FALCON's can be removed from + nic.c:efhw_nic_allocate_common_hardware_resources() + */ + nic->irq_unit[0] = INT_EN_REG_CHAR_OFST; + + /***************************************************************** + * The rest of this function deals with initialization of the NICs + * hardware (as opposed to the initialization of the + * struct efhw_nic data structure */ + + /* char driver grabs SRM events onto the non interrupting + * event queue */ + falcon_nic_srm_upd_evq(nic, FALCON_EVQ_NONIRQ); + + /* RXDP tweaks */ + + /* ?? bug2396 rx_cfg should be ok so long as the net driver + * always pushes buffers big enough for the link MTU */ + + /* set the RX buffer cutoff size to be the same as PAGE_SIZE. + * Use this value when we think that there will be a lot of + * jumbo frames. + * + * The default value 1600 is useful when packets are small, + * but would means that jumbo frame RX queues would need more + * descriptors pushing */ + falcon_nic_set_rx_usr_buf_size(nic, FALCON_RX_USR_BUF_SIZE); + + /* TXDP tweaks */ + /* ?? bug2396 looks ok */ + falcon_nic_tx_cfg(nic, /*unlocked(for non-UDP/TCP)= */ 0); + falcon_nic_pace_cfg(nic); + + /* ?? bug2396 + * netdriver must load first or else must RMW this register */ + falcon_nic_rx_filter_ctl_set(nic, RX_FILTER_CTL_SRCH_LIMIT_TCP_FULL, + RX_FILTER_CTL_SRCH_LIMIT_TCP_WILD, + RX_FILTER_CTL_SRCH_LIMIT_UDP_FULL, + RX_FILTER_CTL_SRCH_LIMIT_UDP_WILD); + + if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) { + rc = efhw_keventq_ctor(nic, FALCON_EVQ_CHAR, &nic->evq[0], + ev_handlers); + if (rc < 0) { + EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d", + __FUNCTION__, rc, FALCON_EVQ_CHAR); + return rc; + } + } + rc = efhw_keventq_ctor(nic, FALCON_EVQ_NONIRQ, + &nic->evq[FALCON_EVQ_NONIRQ], NULL); + if (rc < 0) { + EFHW_ERR("%s: efhw_keventq_ctor() failed (%d) evq=%d", + __FUNCTION__, rc, FALCON_EVQ_NONIRQ); + return rc; + } + + /* allocate IRQ channel */ + rc = falcon_nic_init_irq_channel(nic, 1); + /* ignore failure at user-level for eftest */ + if ((rc < 0) && !(nic->options & NIC_OPT_EFTEST)) + return rc; + + return 0; +} + +/*-------------------------------------------------------------------- + * + * Interrupt + * + *--------------------------------------------------------------------*/ + +static void +falcon_nic_interrupt_enable(struct efhw_nic *nic, unsigned idx) +{ + int evq; + + if (idx || (nic->flags & NIC_FLAG_NO_INTERRUPT)) + return; + + /* Enable driver interrupts */ + EFHW_NOTICE("%s: enable master interrupt", __FUNCTION__); + falcon_nic_interrupt_hw_enable(nic); + + /* An interrupting eventq must start of day ack its read pointer */ + evq = falcon_idx_to_evq(nic, idx); + + if (falcon_evq_is_interrupting(nic, evq)) { + struct efhw_keventq *q = &nic->evq[idx]; + unsigned rdptr = + EFHW_EVENT_OFFSET(q, q, 1) / sizeof(efhw_event_t); + falcon_nic_evq_ack(nic, evq, rdptr, false); + EFHW_NOTICE("%s: ACK evq[%d]:%x", __FUNCTION__, evq, rdptr); + } +} + +static void falcon_nic_interrupt_disable(struct efhw_nic *nic, uint idx) +{ + /* NB. No need to check for NIC_FLAG_NO_INTERRUPT, as + ** falcon_nic_interrupt_hw_disable() will do it. */ + if (idx) + return; + falcon_nic_interrupt_hw_disable(nic); +} + +static void +falcon_nic_set_interrupt_moderation(struct efhw_nic *nic, uint idx, + uint32_t val) +{ + falcon_timer_cmd(nic, falcon_idx_to_evq(nic, idx), + TIMER_MODE_INT_HLDOFF, val / 5); +} + +static inline void legacy_irq_ack(struct efhw_nic *nic) +{ + EFHW_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); + + if (!(nic->flags & NIC_FLAG_MSI)) { + writel(1, EFHW_KVA(nic) + INT_ACK_REG_CHAR_A1_OFST); + mmiowb(); + /* ?? FIXME: We should be doing a read here to ensure IRQ is + * thoroughly acked before we return from ISR. */ + } +} + +static int falcon_nic_interrupt(struct efhw_nic *nic) +{ + volatile uint32_t *syserr_ptr = + (uint32_t *) efhw_iopage_ptr(&nic->irq_iobuff); + int handled = 0; + int done_ack = 0; + + EFHW_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); + EFHW_ASSERT(syserr_ptr); + + /* FIFO fill level interrupt - just log it. */ + if (unlikely(*(syserr_ptr + (DW0_OFST / 4)))) { + EFHW_WARN("%s: *** FIFO *** %x", __FUNCTION__, + *(syserr_ptr + (DW0_OFST / 4))); + *(syserr_ptr + (DW0_OFST / 4)) = 0; + handled++; + } + + /* Fatal interrupts. */ + if (unlikely(*(syserr_ptr + (DW2_OFST / 4)))) { + *(syserr_ptr + (DW2_OFST / 4)) = 0; + falcon_nic_handle_fatal_int(nic); + handled++; + } + + /* Event queue interrupt. For legacy interrupts we have to check + * that the interrupt is for us, because it could be shared. */ + if (*(syserr_ptr + (DW1_OFST / 4))) { + *(syserr_ptr + (DW1_OFST / 4)) = 0; + /* ACK must come before callback to handler fn. */ + legacy_irq_ack(nic); + done_ack = 1; + handled++; + if (nic->irq_handler) + nic->irq_handler(nic, 0); + } + + if (unlikely(!done_ack)) { + if (!handled) + /* Shared interrupt line (hopefully). */ + return 0; + legacy_irq_ack(nic); + } + + EFHW_TRACE("%s: handled %d", __FUNCTION__, handled); + return 1; +} + +/*-------------------------------------------------------------------- + * + * Event Management - and SW event posting + * + *--------------------------------------------------------------------*/ + +static void +falcon_nic_event_queue_enable(struct efhw_nic *nic, uint evq, uint evq_size, + dma_addr_t q_base_addr, /* not used */ + uint buf_base_id) +{ + EFHW_ASSERT(nic); + + /*!\ TODO we can be more efficient if we know whether or not there + * is a timer attached */ + falcon_timer_cmd(nic, evq, 0 /* disable */ , 0); + + falcon_nic_evq_ptr_tbl(nic, evq, 1, buf_base_id, evq_size); + EFHW_TRACE("%s: enable evq %u size %u", __FUNCTION__, evq, evq_size); +} + +static void +falcon_nic_event_queue_disable(struct efhw_nic *nic, uint evq, int timer_only) +{ + EFHW_ASSERT(nic); + + /*!\ TODO we can be more efficient if we know whether or not there + * is a timer attached */ + falcon_timer_cmd(nic, evq, 0 /* disable */ , 0); + + if (!timer_only) + falcon_nic_evq_ptr_tbl(nic, evq, 0, 0, 0); + EFHW_TRACE("%s: disenable evq %u", __FUNCTION__, evq); +} + +static void +falcon_nic_wakeup_request(struct efhw_nic *nic, dma_addr_t q_base_addr, + int next_i, int evq) +{ + EFHW_ASSERT(evq > FALCON_EVQ_CHAR); + falcon_nic_evq_ack(nic, evq, next_i, true); + EFHW_TRACE("%s: evq %d next_i %d", __FUNCTION__, evq, next_i); +} + +static void falcon_nic_sw_event(struct efhw_nic *nic, int data, int evq) +{ + uint64_t ev_data = data; + + ev_data &= ~FALCON_EVENT_CODE_MASK; + ev_data |= FALCON_EVENT_CODE_SW; + + falcon_drv_ev(nic, ev_data, evq); + EFHW_NOTICE("%s: evq[%d]->%x", __FUNCTION__, evq, data); +} + +/*-------------------------------------------------------------------- + * + * Filter support - TODO vary the depth of the search + * + *--------------------------------------------------------------------*/ + +void +falcon_nic_ipfilter_ctor(struct efhw_nic *nic) +{ + if (nic->devtype.variant == 'B' && nic->fpga_version) + nic->filter_tbl_size = 8 * 1024; + else + nic->filter_tbl_size = 16 * 1024; + + host_ipfilter_cache_init(nic); +} + + +static int +falcon_nic_ipfilter_set(struct efhw_nic *nic, int type, int *_filter_idx, + int dmaq, + unsigned saddr_be32, unsigned sport_be16, + unsigned daddr_be32, unsigned dport_be16) +{ + FALCON_LOCK_DECL; + int tcp; + int full; + int rss_b0; + int scat_b0; + int key, hash1, hash2, idx = -1; + int k; + int rc = 0; + unsigned max_srch = -1; + + /* oh joy of joys .. maybe one day we'll optimise */ + unsigned int saddr = ntohl(saddr_be32); + unsigned int daddr = ntohl(daddr_be32); + unsigned int sport = ntohs(sport_be16); + unsigned int dport = ntohs(dport_be16); + + __RANGECHCK(sport, SRC_TCP_DEST_UDP_1_WIDTH); + __RANGECHCK(dport, SRC_TCP_DEST_UDP_1_WIDTH); + + tcp = ((type & EFHW_IP_FILTER_TYPE_TCP_MASK) != 0) ? 1 : 0; + full = ((type & EFHW_IP_FILTER_TYPE_FULL_MASK) != 0) ? 1 : 0; + rss_b0 = ((type & EFHW_IP_FILTER_TYPE_RSS_B0_MASK) != 0) ? 1 : 0; + scat_b0 = ((type & EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK) != 0) ? 0 : 1; + if (tcp && full) + max_srch = tcp_full_srch_limit; + else if (tcp && !full) + max_srch = tcp_wild_srch_limit; + else if (!tcp && full) + max_srch = udp_full_srch_limit; + else if (!tcp && !full) + max_srch = udp_wild_srch_limit; + + EFHW_TRACE("%s: %x tcp %d full %d max_srch=%d", + __FUNCTION__, type, tcp, full, max_srch); + + /* The second hash function is simply + * h2(key) = 13 LSB of (key * 2 - 1) + * And the index(k), or the filter table address for kth search is + * index(k) = 13 LSB of (h1(key) + k * h2(key)) + */ + key = falcon_hash_get_key(saddr, sport, daddr, dport, tcp, full); + hash1 = falcon_hash_function1(key, nic->filter_tbl_size); + hash2 = falcon_hash_function2(key, nic->filter_tbl_size); + + /* Avoid race to claim a filter entry */ + FALCON_LOCK_LOCK(nic); + + for (k = 0; (unsigned)k < max_srch; k++) { + idx = falcon_hash_iterator(hash1, hash2, k, + nic->filter_tbl_size); + + EFHW_TRACE("ipfilter_set[%d:%d:%d]: src=%x:%d dest=%x:%d %s", + *_filter_idx, idx, k, + saddr, sport, daddr, dport, + host_ipfilter_cache_active(nic, idx) ? + "Active" : "Clear"); + + if (!host_ipfilter_cache_active(nic, idx)) + break; + +#if FALCON_FULL_FILTER_CACHE + /* Check that we are not duplicating the filter */ + if (!host_ipfilter_cache_check_not(nic->index, idx, tcp, full, + saddr, sport, daddr, + dport)) { + EFHW_WARN("%s: ERROR: duplicate filter (disabling " + "interrupts)", __FUNCTION__); + FALCON_LOCK_UNLOCK(nic); + falcon_nic_interrupt_hw_disable(nic); + return -EINVAL; + } +#endif + + } + if (k == max_srch) { + rc = -EADDRINUSE; + idx = -1; + goto fail1; + } + + EFHW_ASSERT(idx < (int)nic->filter_tbl_size); + + host_ipfilter_cache_set_addr(nic, idx, dmaq, tcp, full, rss_b0, + scat_b0, saddr, sport, daddr, dport); + + _falcon_nic_ipfilter_set(nic, tcp, full, rss_b0, + scat_b0, idx, dmaq, + saddr, sport, daddr, dport); + + *_filter_idx = idx; + + EFHW_TRACE("%s: filter %x rxq %d src " NIPQUAD_FMT + ":%d dest " NIPQUAD_FMT ":%d set in %d", + __FUNCTION__, idx, dmaq, + NIPQUAD(&saddr), sport, NIPQUAD(&daddr), dport, k); + +fail1: + FALCON_LOCK_UNLOCK(nic); + return rc; +} + +static void +falcon_nic_ipfilter_attach(struct efhw_nic *nic, int filter_idx, int dmaq_idx) +{ + /* Intentionally empty - Falcon attaches and sets the filter + * in filter_set */ + EFHW_TRACE("%s: attach filter %x with rxq %d - ignored", + __FUNCTION__, filter_idx, dmaq_idx); +} + +static void falcon_nic_ipfilter_detach(struct efhw_nic *nic, int filter_idx) +{ + /* Intentionally empty - Falcon attaches and sets the filter + * in filter_clear */ + EFHW_TRACE("%s: detach filter %x from rxq - ignored", + __FUNCTION__, filter_idx); +} + +static void falcon_nic_ipfilter_clear(struct efhw_nic *nic, int filter_idx) +{ + FALCON_LOCK_DECL; + + EFHW_TRACE("%s: filter %x", __FUNCTION__, filter_idx); + + /* In case the filter has already been freed */ + if (filter_idx == -1) + return; + + FALCON_LOCK_LOCK(nic); + + /* if we flush a chained hash then all we need to do is zero it out */ + host_ipfilter_cache_flush(nic, filter_idx); + _falcon_nic_ipfilter_clear(nic, filter_idx); + + FALCON_LOCK_UNLOCK(nic); + return; +} + +/*-------------------------------------------------------------------- + * + * Buffer table - helpers + * + *--------------------------------------------------------------------*/ + +#define FALCON_LAZY_COMMIT_HWM (FALCON_BUFFER_UPD_MAX - 16) + +/* Note re.: + * falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic) + * falcon_nic_buffer_table_update_poll(struct efhw_nic *nic) + * falcon_nic_buffer_table_confirm(struct efhw_nic *nic) + * -- these are no-ops in the user-level driver because it would need to + * coordinate with the real driver on the number of outstanding commits. + * + * An exception is made for eftest apps, which manage the hardware without + * using the char driver. + */ + +static inline void falcon_nic_buffer_table_lazy_commit(struct efhw_nic *nic) +{ +#if defined(__ci_ul_driver__) + if (!(nic->options & NIC_OPT_EFTEST)) + return; +#endif + + /* Do nothing if operating in synchronous mode. */ + if (!nic->irq_handler) + return; +} + +static inline void falcon_nic_buffer_table_update_poll(struct efhw_nic *nic) +{ + FALCON_LOCK_DECL; + int count = 0, rc = 0; + +#if defined(__ci_ul_driver__) + if (!(nic->options & NIC_OPT_EFTEST)) + return; +#endif + + /* We can be called here early days */ + if (!nic->irq_handler) + return; + + /* If we need to gather buffer update events then poll the + non-interrupting event queue */ + + /* For each _buffer_table_commit there will be an update done + event. We don't keep track of how many buffers each commit has + committed, just make sure that all the expected events have been + gathered */ + FALCON_LOCK_LOCK(nic); + + EFHW_TRACE("%s: %d", __FUNCTION__, nic->buf_commit_outstanding); + + while (nic->buf_commit_outstanding > 0) { + /* we're not expecting to handle any events that require + * upcalls into the core driver */ + struct efhw_ev_handler handler; + memset(&handler, 0, sizeof(handler)); + nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = &handler; + rc = efhw_keventq_poll(nic, &nic->evq[FALCON_EVQ_NONIRQ]); + nic->evq[FALCON_EVQ_NONIRQ].ev_handlers = NULL; + + if (rc < 0) { + EFHW_ERR("%s: poll ERROR (%d:%d) ***** ", + __FUNCTION__, rc, + nic->buf_commit_outstanding); + goto out; + } + + FALCON_LOCK_UNLOCK(nic); + + if (count++) + udelay(1); + + if (count > 1000) { + EFHW_WARN("%s: poll Timeout ***** (%d)", __FUNCTION__, + nic->buf_commit_outstanding); + nic->buf_commit_outstanding = 0; + return; + } + FALCON_LOCK_LOCK(nic); + } + +out: + FALCON_LOCK_UNLOCK(nic); + return; +} + +void falcon_nic_buffer_table_confirm(struct efhw_nic *nic) +{ + /* confirm buffer table updates - should be used for items where + loss of data would be unacceptable. E.g for the buffers that back + an event or DMA queue */ + FALCON_LOCK_DECL; + +#if defined(__ci_ul_driver__) + if (!(nic->options & NIC_OPT_EFTEST)) + return; +#endif + + /* Do nothing if operating in synchronous mode. */ + if (!nic->irq_handler) + return; + + FALCON_LOCK_LOCK(nic); + + _falcon_nic_buffer_table_commit(nic); + + FALCON_LOCK_UNLOCK(nic); + + falcon_nic_buffer_table_update_poll(nic); +} + +/*-------------------------------------------------------------------- + * + * Buffer table - API + * + *--------------------------------------------------------------------*/ + +static void +falcon_nic_buffer_table_clear(struct efhw_nic *nic, int buffer_id, int num) +{ + FALCON_LOCK_DECL; + FALCON_LOCK_LOCK(nic); + _falcon_nic_buffer_table_clear(nic, buffer_id, num); + FALCON_LOCK_UNLOCK(nic); +} + +static void +falcon_nic_buffer_table_set(struct efhw_nic *nic, dma_addr_t dma_addr, + uint bufsz, uint region, + int own_id, int buffer_id) +{ + FALCON_LOCK_DECL; + + EFHW_ASSERT(region < FALCON_REGION_NUM); + + EFHW_ASSERT((bufsz == EFHW_4K) || + (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE)); + + falcon_nic_buffer_table_update_poll(nic); + + FALCON_LOCK_LOCK(nic); + + _falcon_nic_buffer_table_set(nic, dma_addr, bufsz, region, own_id, + buffer_id); + + falcon_nic_buffer_table_lazy_commit(nic); + + FALCON_LOCK_UNLOCK(nic); +} + +void +falcon_nic_buffer_table_set_n(struct efhw_nic *nic, int buffer_id, + dma_addr_t dma_addr, uint bufsz, uint region, + int n_pages, int own_id) +{ + /* used to set up a contiguous range of buffers */ + FALCON_LOCK_DECL; + + EFHW_ASSERT(region < FALCON_REGION_NUM); + + EFHW_ASSERT((bufsz == EFHW_4K) || + (bufsz == EFHW_8K && FALCON_BUFFER_TABLE_FULL_MODE)); + + while (n_pages--) { + + falcon_nic_buffer_table_update_poll(nic); + + FALCON_LOCK_LOCK(nic); + + _falcon_nic_buffer_table_set(nic, dma_addr, bufsz, region, + own_id, buffer_id++); + + falcon_nic_buffer_table_lazy_commit(nic); + + FALCON_LOCK_UNLOCK(nic); + + dma_addr += bufsz; + } +} + +/*-------------------------------------------------------------------- + * + * DMA Queues - mid level API + * + *--------------------------------------------------------------------*/ + +#if BUG5302_WORKAROUND + +/* Tx queues can get stuck if the software write pointer is set to an index + * beyond the configured size of the queue, such that they will not flush. + * This code can be run before attempting a flush; it will detect the bogus + * value and reset it. This fixes most instances of this problem, although + * sometimes it does not work, or we may not detect it in the first place, + * if the out-of-range value was replaced by an in-range value earlier. + * (In those cases we have to apply a bigger hammer later, if we see that + * the queue is still not flushing.) + */ +static void +falcon_check_for_bogus_tx_dma_wptr(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + uint64_t val_low64, val_high64; + uint64_t size, hwptr, swptr, val; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + ulong offset = falcon_dma_tx_q_offset(nic, dmaq); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64); + FALCON_LOCK_UNLOCK(nic); + + size = (val_low64 >> TX_DESCQ_SIZE_LBN) + & __FALCON_MASK64(TX_DESCQ_SIZE_WIDTH); + size = (1 << size) * 512; + hwptr = (val_high64 >> __DW3(TX_DESCQ_HW_RPTR_LBN)) + & __FALCON_MASK64(TX_DESCQ_HW_RPTR_WIDTH); + swptr = (val_low64 >> TX_DESCQ_SW_WPTR_LBN) + & __FALCON_MASK64(__LW2(TX_DESCQ_SW_WPTR_LBN)); + val = (val_high64) + & + __FALCON_MASK64(__DW3 + (TX_DESCQ_SW_WPTR_LBN + TX_DESCQ_SW_WPTR_WIDTH)); + val = val << __LW2(TX_DESCQ_SW_WPTR_LBN); + swptr = swptr | val; + + if (swptr >= size) { + EFHW_WARN("Resetting bad write pointer for TXQ[%d]", dmaq); + writel((uint32_t) ((hwptr + 0) & (size - 1)), + efhw_kva + falcon_tx_dma_page_addr(dmaq) + 12); + mmiowb(); + } +} + +/* Here's that "bigger hammer": we reset all the pointers (hardware read, + * hardware descriptor cache read, software write) to zero. + */ +void falcon_clobber_tx_dma_ptrs(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + uint64_t val_low64, val_high64; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + ulong offset = falcon_dma_tx_q_offset(nic, dmaq); + + EFHW_WARN("Recovering stuck TXQ[%d]", dmaq); + FALCON_LOCK_LOCK(nic); + falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64); + val_high64 &= ~(__FALCON_MASK64(TX_DESCQ_HW_RPTR_WIDTH) + << __DW3(TX_DESCQ_HW_RPTR_LBN)); + val_high64 &= ~(__FALCON_MASK64(TX_DC_HW_RPTR_WIDTH) + << __DW3(TX_DC_HW_RPTR_LBN)); + falcon_write_qq(efhw_kva + offset, val_low64, val_high64); + mmiowb(); + writel(0, efhw_kva + falcon_tx_dma_page_addr(dmaq) + 12); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); +} + +#endif + +static inline int +__falcon_really_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + uint val; + + EFHW_BUILD_ASSERT(TX_FLUSH_DESCQ_REG_KER_OFST == + TX_FLUSH_DESCQ_REG_OFST); + + __DWCHCK(TX_FLUSH_DESCQ_CMD_LBN, TX_FLUSH_DESCQ_CMD_WIDTH); + __DWCHCK(TX_FLUSH_DESCQ_LBN, TX_FLUSH_DESCQ_WIDTH); + __RANGECHCK(dmaq, TX_FLUSH_DESCQ_WIDTH); + + val = ((1 << TX_FLUSH_DESCQ_CMD_LBN) | (dmaq << TX_FLUSH_DESCQ_LBN)); + + EFHW_TRACE("TX DMA flush[%d]", dmaq); + +#if BUG5302_WORKAROUND + falcon_check_for_bogus_tx_dma_wptr(nic, dmaq); +#endif + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + TX_FLUSH_DESCQ_REG_OFST, + val, FALCON_ATOMIC_TX_FLUSH_DESCQ); + + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + return 0; +} + +static inline int +__falcon_is_tx_dma_channel_flushed(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + uint64_t val_low64, val_high64; + uint64_t enable, flush_pending; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + ulong offset = falcon_dma_tx_q_offset(nic, dmaq); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_read_qq(efhw_kva + offset, &val_low64, &val_high64); + FALCON_LOCK_UNLOCK(nic); + + /* should see one of three values for these 2 bits + * 1, queue enabled no flush pending + * - i.e. first flush request + * 2, queue enabled, flush pending + * - i.e. request to reflush before flush finished + * 3, queue disabled (no flush pending) + * - flush complete + */ + __DWCHCK(TX_DESCQ_FLUSH_LBN, TX_DESCQ_FLUSH_WIDTH); + __DW3CHCK(TX_DESCQ_EN_LBN, TX_DESCQ_EN_WIDTH); + enable = val_high64 & (1 << __DW3(TX_DESCQ_EN_LBN)); + flush_pending = val_low64 & (1 << TX_DESCQ_FLUSH_LBN); + + if (enable && !flush_pending) + return 0; + + EFHW_TRACE("%d, %s: %s, %sflush pending", dmaq, __FUNCTION__, + enable ? "enabled" : "disabled", + flush_pending ? "" : "NO "); + /* still in progress */ + if (enable && flush_pending) + return -EALREADY; + + return -EAGAIN; +} + +static int falcon_flush_tx_dma_channel(struct efhw_nic *nic, uint dmaq) +{ + int rc; + rc = __falcon_is_tx_dma_channel_flushed(nic, dmaq); + if (rc < 0) { + EFHW_WARN("%s: failed %d", __FUNCTION__, rc); + return rc; + } + return __falcon_really_flush_tx_dma_channel(nic, dmaq); +} + +static int +__falcon_really_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + uint val; + + EFHW_BUILD_ASSERT(RX_FLUSH_DESCQ_REG_KER_OFST == + RX_FLUSH_DESCQ_REG_OFST); + + __DWCHCK(RX_FLUSH_DESCQ_CMD_LBN, RX_FLUSH_DESCQ_CMD_WIDTH); + __DWCHCK(RX_FLUSH_DESCQ_LBN, RX_FLUSH_DESCQ_WIDTH); + __RANGECHCK(dmaq, RX_FLUSH_DESCQ_WIDTH); + + val = ((1 << RX_FLUSH_DESCQ_CMD_LBN) | (dmaq << RX_FLUSH_DESCQ_LBN)); + + EFHW_TRACE("RX DMA flush[%d]", dmaq); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_write_qq(efhw_kva + RX_FLUSH_DESCQ_REG_OFST, val, + FALCON_ATOMIC_RX_FLUSH_DESCQ); + mmiowb(); + FALCON_LOCK_UNLOCK(nic); + return 0; +} + +static inline int +__falcon_is_rx_dma_channel_flushed(struct efhw_nic *nic, uint dmaq) +{ + FALCON_LOCK_DECL; + uint64_t val; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + ulong offset = falcon_dma_rx_q_offset(nic, dmaq); + + /* Falcon requires 128 bit atomic access for this register */ + FALCON_LOCK_LOCK(nic); + falcon_read_q(efhw_kva + offset, &val); + FALCON_LOCK_UNLOCK(nic); + + __DWCHCK(RX_DESCQ_EN_LBN, RX_DESCQ_EN_WIDTH); + + /* is it enabled? */ + return (val & (1 << RX_DESCQ_EN_LBN)) + ? 0 : -EAGAIN; +} + +static int falcon_flush_rx_dma_channel(struct efhw_nic *nic, uint dmaq) +{ + int rc; + rc = __falcon_is_rx_dma_channel_flushed(nic, dmaq); + if (rc < 0) { + EFHW_ERR("%s: failed %d", __FUNCTION__, rc); + return rc; + } + return __falcon_really_flush_rx_dma_channel(nic, dmaq); +} + +/*-------------------------------------------------------------------- + * + * Falcon specific event callbacks + * + *--------------------------------------------------------------------*/ + +int +falcon_handle_char_event(struct efhw_nic *nic, struct efhw_ev_handler *h, + efhw_event_t *ev) +{ + EFHW_TRACE("DRIVER EVENT: "FALCON_EVENT_FMT, + FALCON_EVENT_PRI_ARG(*ev)); + + switch (FALCON_EVENT_DRIVER_SUBCODE(ev)) { + + case TX_DESCQ_FLS_DONE_EV_DECODE: + EFHW_TRACE("TX[%d] flushed", + (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev)); +#if !defined(__ci_ul_driver__) + efhw_handle_txdmaq_flushed(nic, h, ev); +#endif + break; + + case RX_DESCQ_FLS_DONE_EV_DECODE: + EFHW_TRACE("RX[%d] flushed", + (int)FALCON_EVENT_TX_FLUSH_Q_ID(ev)); +#if !defined(__ci_ul_driver__) + efhw_handle_rxdmaq_flushed(nic, h, ev); +#endif + break; + + case SRM_UPD_DONE_EV_DECODE: + nic->buf_commit_outstanding = + max(0, nic->buf_commit_outstanding - 1); + EFHW_TRACE("COMMIT DONE %d", nic->buf_commit_outstanding); + break; + + case EVQ_INIT_DONE_EV_DECODE: + EFHW_TRACE("EVQ INIT"); + break; + + case WAKE_UP_EV_DECODE: + EFHW_TRACE("WAKE UP"); + efhw_handle_wakeup_event(nic, h, ev); + break; + + case TIMER_EV_DECODE: + EFHW_TRACE("TIMER"); + efhw_handle_timeout_event(nic, h, ev); + break; + + case RX_DESCQ_FLSFF_OVFL_EV_DECODE: + /* This shouldn't happen. */ + EFHW_ERR("%s: RX flush fifo overflowed", __FUNCTION__); + return -EINVAL; + + default: + EFHW_TRACE("UNKOWN DRIVER EVENT: " FALCON_EVENT_FMT, + FALCON_EVENT_PRI_ARG(*ev)); + break; + } + return 0; +} + +/*-------------------------------------------------------------------- + * + * Abstraction Layer Hooks + * + *--------------------------------------------------------------------*/ + +struct efhw_func_ops falcon_char_functional_units = { + falcon_nic_close_hardware, + falcon_nic_init_hardware, + falcon_nic_interrupt, + falcon_nic_interrupt_enable, + falcon_nic_interrupt_disable, + falcon_nic_set_interrupt_moderation, + falcon_nic_event_queue_enable, + falcon_nic_event_queue_disable, + falcon_nic_wakeup_request, + falcon_nic_sw_event, + falcon_nic_ipfilter_set, + falcon_nic_ipfilter_attach, + falcon_nic_ipfilter_detach, + falcon_nic_ipfilter_clear, + falcon_dmaq_tx_q_init, + falcon_dmaq_rx_q_init, + falcon_dmaq_tx_q_disable, + falcon_dmaq_rx_q_disable, + falcon_flush_tx_dma_channel, + falcon_flush_rx_dma_channel, + falcon_nic_buffer_table_set, + falcon_nic_buffer_table_set_n, + falcon_nic_buffer_table_clear, + falcon_nic_buffer_table_commit, +}; Index: head-2008-03-17/drivers/net/sfc/sfc_resource/falcon_hash.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/falcon_hash.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,178 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains EtherFabric NIC hash algorithms implementation. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include + + +/* this mask is per filter bank hence /2 */ +#define FILTER_MASK(n) ((n) / 2u - 1u) + +/* + * Main Functions related to the Hash Table Generation + * Author: Srinivasaih, Nataraj + * Created: Thu May 13:32:41 PDT 2004 + * $Id: 1005-2.6.25-xen-sfc-resource-driver.patch,v 1.1 2008-05-23 17:35:35 niro Exp $ + */ +/*************************************************************************** +Class Maximum number of Valid address ranges + hosts per network +A 16777214 1.0.0.1 through 9.255.255.254 + 11.0.0.1 through 126.255.255.254 +B 65534 128.0.0.1 through 172.15.255.254 + 172.32.0.1 through 191.255.255.254 +C 254 192.0.0.1 through 192.167.255.254 + 192.169.0.1 through 223.255.255.254 +P 16777214 10.0.0.1 through 10.255.255.254 (10/8) + 1048574 172.16.0.1 through 172.31.255.254 (172.16/12) + 65534 192.168.0.1 through 192.168.255.254 (192.168/16) + +R - 0.0.0.0 through 0.255.255.255 + (used if host will be assigned a + valid address dynamically) + 127.0.0.0 through 127.255.255.255 + (loopback addresses) + +P : Private internets only +R : Reserved +****************************************************************************/ + +/* All LE parameters */ +unsigned int +falcon_hash_get_key(unsigned int src_ip, unsigned int src_port, + unsigned int dest_ip, unsigned int dest_port, + int tcp, int full) +{ + + unsigned int result = 0; + int net_type; + + EFHW_ASSERT(tcp == 0 || tcp == 1); + EFHW_ASSERT(full == 0 || full == 1); + + net_type = tcp << 4 | full; + + /* Note that src_ip and src_port ignored if a wildcard filter */ + switch (net_type) { + case 0x01: /* UDP Full */ + result = ((dest_ip & 0xfffffffe) | (!(dest_ip & 1))) ^ + (((dest_port << 16) & 0xFFFF0000) | + ((src_ip >> 16) & 0x0000FFFF)) ^ + (((src_ip << 16) & 0xFFFF0000) | + ((src_port & 0x0000FFFF))); + EFHW_TRACE("falcon_hash_get_key: UDP Full %x", result); + break; + case 0x00: /* UDP Wild Card */ + result = ((dest_ip & 0xfffffffe) | (!(dest_ip & 1))) ^ + (((dest_port << 16) & 0x00000000) | + ((src_ip >> 16) & 0x00000000)) ^ + (((src_ip << 16) & 0x00000000) | + ((dest_port & 0x0000FFFF))); + EFHW_TRACE("falcon_hash_get_key: UDP Wildcard %x", result); + break; + case 0x10: /* TCP Wild Card */ + result = (dest_ip) ^ + (((dest_port << 16) & 0xFFFF0000) | + ((src_ip >> 16) & 0x00000000)) ^ + (((src_ip << 16) & 0x00000000) | + ((src_port & 0x00000000))); + EFHW_TRACE("falcon_hash_get_key: TCP Wildcard %x", result); + break; + case 0x11: /* TCP Full */ + result = (dest_ip) ^ + (((dest_port << 16) & 0xFFFF0000) | + ((src_ip >> 16) & 0x0000FFFF)) ^ + (((src_ip << 16) & 0xFFFF0000) | + ((src_port & 0x0000FFFF))); + EFHW_TRACE("falcon_hash_get_key: TCP Full %x", result); + break; + default: + EFHW_ASSERT(0); + + } + return (result); +} + +/* This function generates the First Hash key */ +unsigned int falcon_hash_function1(unsigned int key, unsigned int nfilters) +{ + + unsigned short int lfsr_reg; + unsigned int tmp_key; + int index; + + unsigned short int lfsr_input; + unsigned short int single_bit_key; + unsigned short int bit16_lfsr; + unsigned short int bit3_lfsr; + + lfsr_reg = 0xFFFF; + tmp_key = key; + + /* For Polynomial equation X^16+X^3+1 */ + for (index = 0; index < 32; index++) { + /* Get the bit from key and shift the key */ + single_bit_key = (tmp_key & 0x80000000) >> 31; + tmp_key = tmp_key << 1; + + /* get the Tap bits to XOR operation */ + bit16_lfsr = (lfsr_reg & 0x8000) >> 15; + bit3_lfsr = (lfsr_reg & 0x0004) >> 2; + + /* Get the Input value to the LFSR */ + lfsr_input = ((bit16_lfsr ^ bit3_lfsr) ^ single_bit_key); + + /* Shift and store out of the two TAPs */ + lfsr_reg = lfsr_reg << 1; + lfsr_reg = lfsr_reg | (lfsr_input & 0x0001); + + } + + lfsr_reg = lfsr_reg & FILTER_MASK(nfilters); + + return lfsr_reg; +} + +/* This function generates the Second Hash */ +unsigned int +falcon_hash_function2(unsigned int key, unsigned int nfilters) +{ + return (unsigned int)(((unsigned long long)key * 2 - 1) & + FILTER_MASK(nfilters)); +} + +/* This function iterates through the hash table */ +unsigned int +falcon_hash_iterator(unsigned int hash1, unsigned int hash2, + unsigned int n_search, unsigned int nfilters) +{ + return ((hash1 + (n_search * hash2)) & FILTER_MASK(nfilters)); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/falcon_mac.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/falcon_mac.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,171 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains MACs (Mentor MAC & GDACT1 ) support for Falcon. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include + +/******************************************************************** + * Mentor MAC + */ + +#define _PRE(x) GM##x + +/*-------------------------------------------------------------------- + * + * Debug Support + * + *--------------------------------------------------------------------*/ + +#define MENTOR_MAC_ASSERT_VALID() \ + EFHW_ASSERT(nic); \ + EFHW_ASSERT(EFHW_KVA(nic)); \ + EFHW_ASSERT(_PRE(_CFG1_REG_OFST) == _PRE(_CFG1_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(_CFG2_REG_OFST) == _PRE(_CFG2_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(_IPG_REG_OFST) == _PRE(_IPG_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(_HD_REG_OFST) == _PRE(_HD_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(_MAX_FLEN_REG_OFST) == _PRE(_MAX_FLEN_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(_TEST_REG_OFST) == _PRE(_TEST_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(_ADR1_REG_OFST) == _PRE(_ADR1_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(_ADR2_REG_OFST) == _PRE(_ADR2_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(F_CFG0_REG_OFST) == _PRE(F_CFG0_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(F_CFG1_REG_OFST) == _PRE(F_CFG1_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(F_CFG2_REG_OFST) == _PRE(F_CFG2_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(F_CFG3_REG_OFST) == _PRE(F_CFG3_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(F_CFG4_REG_OFST) == _PRE(F_CFG4_REG_KER_OFST)); \ + EFHW_ASSERT(_PRE(F_CFG5_REG_OFST) == _PRE(F_CFG5_REG_KER_OFST)); + +/*! Get MAC current address - i.e not necessarily the one in the EEPROM */ +static inline void mentormac_get_mac_addr(struct efhw_nic *nic) +{ + efhw_ioaddr_t mac_kva; + uint val1, val2; + + MENTOR_MAC_ASSERT_VALID(); + + mac_kva = GM_P0_BASE + EFHW_KVA(nic); + + val1 = readl(mac_kva + _PRE(_ADR1_REG_OFST)); + val2 = readl(mac_kva + _PRE(_ADR2_REG_OFST)); + +#if 0 + nic->mac_addr[0] = (val1 & 0xff000000) >> 24; + nic->mac_addr[1] = (val1 & 0x00ff0000) >> 16; + nic->mac_addr[2] = (val1 & 0x0000ff00) >> 8; + nic->mac_addr[3] = (val1 & 0x000000ff) >> 0; + nic->mac_addr[4] = (val2 & 0xff000000) >> 24; + nic->mac_addr[5] = (val2 & 0x00ff0000) >> 16; +#else + nic->mac_addr[5] = (val1 & 0xff000000) >> 24; + nic->mac_addr[4] = (val1 & 0x00ff0000) >> 16; + nic->mac_addr[3] = (val1 & 0x0000ff00) >> 8; + nic->mac_addr[2] = (val1 & 0x000000ff) >> 0; + nic->mac_addr[1] = (val2 & 0xff000000) >> 24; + nic->mac_addr[0] = (val2 & 0x00ff0000) >> 16; +#endif +} + + +/******************************************************************** + * GDACT10 MAC + */ + +/*-------------------------------------------------------------------- + * + * Debug Support + * + *--------------------------------------------------------------------*/ + +#define GDACT10_MAC_ASSERT_VALID() \ + EFHW_ASSERT(nic); \ + EFHW_ASSERT(EFHW_KVA(nic)); \ + EFHW_ASSERT(XM_GLB_CFG_REG_P0_OFST == XM_GLB_CFG_REG_KER_P0_OFST); \ + EFHW_ASSERT(XM_TX_CFG_REG_P0_OFST == XM_TX_CFG_REG_KER_P0_OFST); \ + EFHW_ASSERT(XM_RX_CFG_REG_P0_OFST == XM_RX_CFG_REG_KER_P0_OFST); \ + EFHW_ASSERT(MAC0_SPEED_LBN == MAC1_SPEED_LBN); \ + EFHW_ASSERT(MAC0_SPEED_WIDTH == MAC1_SPEED_WIDTH); \ + EFHW_ASSERT(MAC0_LINK_STATUS_LBN == MAC1_LINK_STATUS_LBN); \ + EFHW_ASSERT(MAC0_LINK_STATUS_WIDTH == MAC1_LINK_STATUS_WIDTH); \ + EFHW_ASSERT(MAC1_BCAD_ACPT_LBN == MAC0_BCAD_ACPT_LBN); \ + EFHW_ASSERT(MAC1_UC_PROM_LBN == MAC0_UC_PROM_LBN); \ + EFHW_ASSERT(MAC0_CTRL_REG_KER_OFST == MAC0_CTRL_REG_OFST); \ + EFHW_ASSERT(MAC1_CTRL_REG_KER_OFST == MAC1_CTRL_REG_OFST); \ + EFHW_ASSERT(XM_ADR_LO_REG_KER_P0_OFST == XM_ADR_LO_REG_P0_OFST); \ + EFHW_ASSERT(XM_ADR_HI_REG_KER_P0_OFST == XM_ADR_HI_REG_P0_OFST); \ + EFHW_ASSERT(XM_RX_PARAM_REG_KER_P0_OFST == XM_RX_PARAM_REG_P0_OFST); + +/*-------------------------------------------------------------------- + * + * Information gathering + * + *--------------------------------------------------------------------*/ + +/*! Get MAC current address - i.e not necessarily the one in the EEPROM */ +static inline void GDACT10mac_get_mac_addr(struct efhw_nic *nic) +{ + uint val1, val2; + efhw_ioaddr_t efhw_kva = EFHW_KVA(nic); + FALCON_LOCK_DECL; + + GDACT10_MAC_ASSERT_VALID(); + + EFHW_ASSERT(XM_ADR_LO_LBN == 0); + EFHW_ASSERT(XM_ADR_LO_WIDTH == 32); + EFHW_ASSERT(XM_ADR_HI_LBN == 0); + EFHW_ASSERT(XM_ADR_HI_WIDTH == 16); + + FALCON_LOCK_LOCK(nic); + + val1 = readl(efhw_kva + XM_ADR_LO_REG_P0_OFST); + val2 = readl(efhw_kva + XM_ADR_HI_REG_P0_OFST); + + FALCON_LOCK_UNLOCK(nic); + + /* The HW scores no points for consistency */ + nic->mac_addr[5] = (val2 & 0x0000ff00) >> 8; + nic->mac_addr[4] = (val2 & 0x000000ff) >> 0; + nic->mac_addr[3] = (val1 & 0xff000000) >> 24; + nic->mac_addr[2] = (val1 & 0x00ff0000) >> 16; + nic->mac_addr[1] = (val1 & 0x0000ff00) >> 8; + nic->mac_addr[0] = (val1 & 0x000000ff) >> 0; +} + + +/******************************************************************** + * Call one or another function + */ + +void falcon_get_mac_addr(struct efhw_nic *nic) +{ + if (nic->flags & NIC_FLAG_10G) + GDACT10mac_get_mac_addr(nic); + else + mentormac_get_mac_addr(nic); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/filter_resource.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/filter_resource.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,317 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains filters support. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include + +struct filter_resource_manager { + struct efrm_resource_manager rm; + struct kfifo *free_ids; +}; + +static struct filter_resource_manager *efrm_filter_manager; + +void efrm_filter_resource_free(struct filter_resource *frs) +{ + struct efhw_nic *nic; + int nic_i; + int id; + + EFRM_RESOURCE_ASSERT_VALID(&frs->rs, 1); + + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle)); + + /* if we have a PT endpoint */ + if (NULL != frs->pt) { + /* Detach the filter */ + EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic) + efhw_nic_ipfilter_detach(nic, frs->filter_idx); + + /* Release our ref to the PT resource. */ + EFRM_TRACE("%s: releasing PT resource reference", + __FUNCTION__); + efrm_vi_resource_release(frs->pt); + } + + /* Disable the filter. */ + EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic) + efhw_nic_ipfilter_clear(nic, frs->filter_idx); + + /* Free this filter. */ + id = EFRM_RESOURCE_INSTANCE(frs->rs.rs_handle); + EFRM_VERIFY_EQ(kfifo_put(efrm_filter_manager->free_ids, + (unsigned char *)&id, sizeof(id)), + sizeof(id)); + + EFRM_DO_DEBUG(memset(frs, 0, sizeof(*frs))); + kfree(frs); +} +EXPORT_SYMBOL(efrm_filter_resource_free); + +static void filter_rm_dtor(struct efrm_resource_manager *rm) +{ + EFRM_TRACE("filter_rm_dtor"); + + EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm); + EFRM_ASSERT(&efrm_filter_manager->rm == rm); + + kfifo_vfree(efrm_filter_manager->free_ids); + EFRM_TRACE("filter_rm_dtor: done"); +} + +/**********************************************************************/ +/**********************************************************************/ +/**********************************************************************/ + +int efrm_create_filter_resource_manager(struct efrm_resource_manager **rm_out) +{ + int rc; + + EFRM_ASSERT(rm_out); + + efrm_filter_manager = + kmalloc(sizeof(struct filter_resource_manager), GFP_KERNEL); + if (efrm_filter_manager == 0) + return -ENOMEM; + memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager)); + + rc = efrm_resource_manager_ctor(&efrm_filter_manager->rm, + filter_rm_dtor, "FILTER", + EFRM_RESOURCE_FILTER, 0); + if (rc < 0) + goto fail1; + + /* Create a pool of free instances */ + rc = efrm_kfifo_id_ctor(&efrm_filter_manager->free_ids, + 0, EFHW_IP_FILTER_NUM, + &efrm_filter_manager->rm.rm_lock); + if (rc != 0) + goto fail2; + + *rm_out = &efrm_filter_manager->rm; + EFRM_TRACE("%s: filter resources created - %d IDs", + __FUNCTION__, kfifo_len(efrm_filter_manager->free_ids)); + return 0; + +fail2: + efrm_resource_manager_dtor(&efrm_filter_manager->rm); +fail1: + memset(efrm_filter_manager, 0, sizeof(*efrm_filter_manager)); + kfree(efrm_filter_manager); + return rc; + +} + +/*-------------------------------------------------------------------- + *! + * Called to set/change the PT endpoint of a filter + * + * Example of use is TCP helper when it finds a wildcard IP filter + * needs to change which application it delivers traffic to + * + * \param frs filter resource + * \param pt_handle handle of new PT endpoint + * + * \return standard error codes + * + *--------------------------------------------------------------------*/ +int +efrm_filter_resource_set_ptresource(struct filter_resource *frs, + struct vi_resource *ptrs) +{ + int rc, pti, nic_i; + struct efhw_nic *nic; + + EFRM_ASSERT(frs); + + /* if filter is attached to a valid PT endpoint */ + if (NULL != frs->pt) { + + EFRM_TRACE("%s: detaching PT resource " EFRM_RESOURCE_FMT + " from filter ", + __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle)); + /* Detach the filter */ + EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic) + efhw_nic_ipfilter_detach(nic, frs->filter_idx); + + /* release reference */ + efrm_vi_resource_release(frs->pt); + frs->pt = NULL; + } + + if (ptrs != NULL) { + /* get PT endpoint index */ + EFRM_RESOURCE_ASSERT_VALID(&ptrs->rs, 0); + EFRM_ASSERT(EFRM_RESOURCE_TYPE(ptrs->rs.rs_handle) == + EFRM_RESOURCE_VI); + pti = EFRM_RESOURCE_INSTANCE(ptrs->rs.rs_handle); + if (pti == 0) { + EFRM_ERR("%s: cannot filter for channel 0", + __FUNCTION__); + rc = -EINVAL; + goto fail2; + } + frs->pt = ptrs; + EFRM_TRACE("%s: attaching PT resource " EFRM_RESOURCE_FMT + " to filter", + __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(frs->pt->rs.rs_handle)); + EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic) + efhw_nic_ipfilter_attach(nic, frs->filter_idx, pti); + efrm_vi_resource_ref(frs->pt); + } + return 0; + +fail2: + efrm_vi_resource_release(frs->pt); + return rc; +} +EXPORT_SYMBOL(efrm_filter_resource_set_ptresource); + +int efrm_filter_resource_clear(struct filter_resource *frs) +{ + struct efhw_nic *nic; + int nic_i; + + EFRM_ASSERT(frs); + EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic) + efhw_nic_ipfilter_clear(nic, frs->filter_idx); + + return 0; +} +EXPORT_SYMBOL(efrm_filter_resource_clear); + +int +__efrm_filter_resource_set(struct filter_resource *frs, int type, + unsigned saddr, uint16_t sport, + unsigned daddr, uint16_t dport) +{ + struct efhw_nic *nic; + int nic_i, rc = 0; + unsigned instance = EFRM_RESOURCE_INSTANCE(frs->pt->rs.rs_handle); + + EFRM_ASSERT(frs); + EFRM_ASSERT(frs->pt); + + if (efrm_nic_table.a_nic->devtype.variant >= 'B') { + /* Scatter setting must match the setting for + * the corresponding RX queue */ + if (!(frs->pt->flags & EFHW_VI_JUMBO_EN)) + type |= EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK; + } + + EFRM_FOR_EACH_NIC_IN_SET(&frs->nic_set, nic_i, nic) + if (rc >= 0) + rc = efhw_nic_ipfilter_set(nic, type, &frs->filter_idx, + instance, + saddr, sport, daddr, dport); + + return rc; +} +EXPORT_SYMBOL(__efrm_filter_resource_set);; + +int +efrm_filter_resource_alloc(struct vi_resource *vi_parent, + struct filter_resource **frs_out) +{ + struct efhw_nic *nic; + int nic_i, rc, instance; + struct filter_resource *frs; + + EFRM_ASSERT(frs_out); + EFRM_ASSERT(efrm_filter_manager); + EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_filter_manager->rm); + EFRM_ASSERT(vi_parent == NULL || + EFRM_RESOURCE_TYPE(vi_parent->rs.rs_handle) == + EFRM_RESOURCE_VI); + + /* Allocate resource data structure. */ + frs = kmalloc(sizeof(struct filter_resource), GFP_KERNEL); + if (!frs) + return -ENOMEM; + efrm_nic_set_clear(&frs->nic_set); + + /* Allocate an instance. */ + rc = kfifo_get(efrm_filter_manager->free_ids, + (unsigned char *)&instance, sizeof(instance)); + if (rc != sizeof(instance)) { + EFRM_TRACE("%s: out of instances", __FUNCTION__); + EFRM_ASSERT(rc == 0); + rc = -EBUSY; + goto fail1; + } + + /* Initialise the resource DS. */ + efrm_resource_init(&frs->rs, EFRM_RESOURCE_FILTER, instance); + frs->pt = vi_parent; + if (frs->pt) + efrm_vi_resource_ref(frs->pt); + frs->filter_idx = -1; + EFRM_FOR_EACH_NIC(nic_i, nic) + efrm_nic_set_write(&frs->nic_set, nic_i, true); + + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " Q %d idx %x", + __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(frs->rs.rs_handle), + vi_parent == NULL ? -1 : + EFRM_RESOURCE_INSTANCE(vi_parent->rs.rs_handle), + frs->filter_idx); + + /* Put it into the resource manager's table. */ + rc = efrm_resource_manager_insert(&frs->rs); + if (rc != 0) { + if (atomic_dec_and_test(&frs->rs.rs_ref_count)) + efrm_filter_resource_free(frs); + return rc; + } + + *frs_out = frs; + return 0; + +fail1: + memset(frs, 0, sizeof(*frs)); + kfree(frs); + return rc; +} +EXPORT_SYMBOL(efrm_filter_resource_alloc); Index: head-2008-03-17/drivers/net/sfc/sfc_resource/iobufset_resource.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/iobufset_resource.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,373 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains non-contiguous I/O buffers support. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include + +#define EFRM_IOBUFSET_MAX_NUM_INSTANCES 0x00010000 + +struct iobufset_resource_manager { + struct efrm_resource_manager rm; + struct kfifo *free_ids; +}; + +struct iobufset_resource_manager *efrm_iobufset_manager; + +#define iobsrs(rs1) iobufset_resource(rs1) + +/* Returns size of iobufset resource data structure. */ +static inline size_t iobsrs_size(int no_pages) +{ + return offsetof(struct iobufset_resource, bufs) + + no_pages * sizeof(efhw_iopage_t); +} + +void efrm_iobufset_resource_free(struct iobufset_resource *rs) +{ + unsigned int no_pages; + unsigned int i; + int id; + + EFRM_RESOURCE_ASSERT_VALID(&rs->rs, 1); + no_pages = rs->n_bufs; + + if (rs->buf_tbl_alloc.base != (unsigned)-1) + efrm_buffer_table_free(&rs->buf_tbl_alloc); + + /* see comment on call to efhw_iopage_alloc in the alloc routine above + for discussion on use of efrm_nic_table.a_nic here */ + EFRM_ASSERT(efrm_nic_table.a_nic); + if (rs->order == 0) { + for (i = 0; i < rs->n_bufs; ++i) + efhw_iopage_free(efrm_nic_table.a_nic, &rs->bufs[i]); + } else { + /* it is important that this is executed in increasing page + * order because some implementations of + * efhw_iopages_init_from_iopage() assume this */ + for (i = 0; i < rs->n_bufs; + i += rs->pages_per_contiguous_chunk) { + efhw_iopages_t iopages; + efhw_iopages_init_from_iopage(&iopages, &rs->bufs[i], + rs->order); + efhw_iopages_free(efrm_nic_table.a_nic, &iopages); + } + } + + /* free the instance number */ + id = EFRM_RESOURCE_INSTANCE(rs->rs.rs_handle); + EFRM_VERIFY_EQ(kfifo_put(efrm_iobufset_manager->free_ids, + (unsigned char *)&id, sizeof(id)), sizeof(id)); + + efrm_vi_resource_release(rs->evq); + + EFRM_DO_DEBUG(memset(rs, 0, sizeof(*rs))); + if (iobsrs_size(no_pages) < PAGE_SIZE) { + kfree(rs); + } else { + vfree(rs); + } +} +EXPORT_SYMBOL(efrm_iobufset_resource_free); + +int +efrm_iobufset_resource_alloc(int32_t n_pages, + int32_t pages_per_contiguous_chunk, + struct vi_resource *vi_evq, + bool phys_addr_mode, + uint32_t faultonaccess, + struct iobufset_resource **iobrs_out) +{ + struct iobufset_resource *iobrs; + int rc, instance, object_size; + unsigned int i; + + EFRM_ASSERT(iobrs_out); + EFRM_ASSERT(efrm_iobufset_manager); + EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_iobufset_manager->rm); + EFRM_RESOURCE_ASSERT_VALID(&vi_evq->rs, 0); + EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_evq->rs.rs_handle) == + EFRM_RESOURCE_VI); + EFRM_ASSERT(efrm_nic_table.a_nic); + + /* allocate the resource data structure. */ + object_size = iobsrs_size(n_pages); + if (object_size < PAGE_SIZE) { + /* this should be OK from a tasklet */ + /* Necessary to do atomic alloc() as this + can be called from a weird-ass iSCSI context that is + !in_interrupt but is in_atomic - See BUG3163 */ + iobrs = kmalloc(object_size, GFP_ATOMIC); + } else { /* can't do this within a tasklet */ +#ifndef NDEBUG + if (in_interrupt() || in_atomic()) { + EFRM_ERR("%s(): alloc->u.iobufset.in_n_pages=%d", + __FUNCTION__, n_pages); + EFRM_ASSERT(!in_interrupt()); + EFRM_ASSERT(!in_atomic()); + } +#endif + iobrs = (struct iobufset_resource *) vmalloc(object_size); + } + if (iobrs == 0) { + rc = -ENOMEM; + goto fail1; + } + + /* Allocate an instance number. */ + rc = kfifo_get(efrm_iobufset_manager->free_ids, + (unsigned char *)&instance, sizeof(instance)); + if (rc != sizeof(instance)) { + EFRM_TRACE("%s: out of instances", __FUNCTION__); + EFRM_ASSERT(rc == 0); + rc = -EBUSY; + goto fail3; + } + + efrm_resource_init(&iobrs->rs, EFRM_RESOURCE_IOBUFSET, instance); + + iobrs->evq = vi_evq; + efrm_vi_resource_ref(iobrs->evq); + + iobrs->n_bufs = n_pages; + iobrs->pages_per_contiguous_chunk = pages_per_contiguous_chunk; + iobrs->order = fls(iobrs->pages_per_contiguous_chunk - 1); + iobrs->faultonaccess = faultonaccess; + + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %u pages", __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle), iobrs->n_bufs); + + /* Allocate the iobuffers. */ + if (iobrs->order == 0) { + /* make sure iobufs are in a known state in case we don't + * finish our allocation */ + for (i = 0; i < iobrs->n_bufs; ++i) + memset(&iobrs->bufs[i], 0, sizeof(iobrs->bufs[i])); + + for (i = 0; i < iobrs->n_bufs; ++i) { + /* due to bug2426 we have to specifiy a NIC when + * allocating a DMAable page, which is a bit messy. + * For now we assume that if the page is suitable + * (e.g. DMAable) by one nic (efrm_nic_table.a_nic), + * it is suitable for all NICs. + * XXX I bet that breaks in Solaris. + */ + rc = efhw_iopage_alloc(efrm_nic_table.a_nic, + &iobrs->bufs[i]); + if (rc < 0) { + EFRM_ERR("%s: failed (rc %d) to allocate " + "page (i=%u)", __FUNCTION__, rc, i); + goto fail4; + } + } + } else { + efhw_iopages_t iopages; + unsigned j; + + /* make sure iobufs are in a known state in case we don't + * finish our allocation */ + for (i = 0; i < iobrs->n_bufs; ++i) + memset(&iobrs->bufs[i], 0, sizeof(iobrs->bufs[i])); + + for (i = 0; i < iobrs->n_bufs; + i += iobrs->pages_per_contiguous_chunk) { + rc = efhw_iopages_alloc(efrm_nic_table.a_nic, + &iopages, iobrs->order); + if (rc < 0) { + EFRM_ERR("%s: failed (rc %d) to allocate " + "pages (i=%u order %d)", + __FUNCTION__, rc, i, iobrs->order); + goto fail4; + } + for (j = 0; j < iobrs->pages_per_contiguous_chunk; + j++) { + /* some implementation of + * efhw_iopage_init_from_iopages() rely on + * this function being called for + * _all_ pages in the chunk */ + efhw_iopage_init_from_iopages( + &iobrs->bufs[i + j], + &iopages, j); + } + } + } + + iobrs->buf_tbl_alloc.base = (unsigned)-1; + + if (!phys_addr_mode) { + unsigned instance = EFAB_VI_RESOURCE_INSTANCE(iobrs->evq); + /* Allocate space in the NIC's buffer table. */ + rc = efrm_buffer_table_alloc(fls(iobrs->n_bufs - 1), + &iobrs->buf_tbl_alloc); + if (rc < 0) { + EFRM_ERR("%s: failed (%d) to alloc %d buffer table " + "entries", __FUNCTION__, rc, iobrs->n_bufs); + goto fail5; + } + EFRM_ASSERT(((unsigned)1 << iobrs->buf_tbl_alloc.order) >= + (unsigned)iobrs->n_bufs); + + /* Initialise the buffer table entries. */ + for (i = 0; i < iobrs->n_bufs; ++i) { + /*\ ?? \TODO burst them! */ + efrm_buffer_table_set(&iobrs->buf_tbl_alloc, i, + efhw_iopage_dma_addr(&iobrs-> + bufs[i]), + instance); + } + efrm_buffer_table_commit(); + } + + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %d pages @ " + EFHW_BUFFER_ADDR_FMT, __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle), + iobrs->n_bufs, EFHW_BUFFER_ADDR(iobrs->buf_tbl_alloc.base, + 0)); + + /* Put it into the resource manager's table. */ + rc = efrm_resource_manager_insert(&iobrs->rs); + if (rc != 0) { + if (atomic_dec_and_test(&iobrs->rs.rs_ref_count)) + efrm_iobufset_resource_free(iobrs); + return rc; + } + + *iobrs_out = iobrs; + return 0; + +fail5: + i = iobrs->n_bufs; +fail4: + /* see comment on call to efhw_iopage_alloc above for a discussion + * on use of efrm_nic_table.a_nic here */ + if (iobrs->order == 0) { + while (i--) { + efhw_iopage_t *page = &iobrs->bufs[i]; + efhw_iopage_free(efrm_nic_table.a_nic, page); + } + } else { + unsigned int j; + for (j = 0; j < i; j += iobrs->pages_per_contiguous_chunk) { + efhw_iopages_t iopages; + + EFRM_ASSERT(j % iobrs->pages_per_contiguous_chunk + == 0); + /* it is important that this is executed in increasing + * page order because some implementations of + * efhw_iopages_init_from_iopage() assume this */ + efhw_iopages_init_from_iopage(&iopages, + &iobrs->bufs[j], + iobrs->order); + efhw_iopages_free(efrm_nic_table.a_nic, &iopages); + } + } + efrm_vi_resource_release(iobrs->evq); +fail3: + if (object_size < PAGE_SIZE) { + kfree(iobrs); + } else { + vfree(iobrs); + } +fail1: + return rc; +} +EXPORT_SYMBOL(efrm_iobufset_resource_alloc); + +static void iobufset_rm_dtor(struct efrm_resource_manager *rm) +{ + EFRM_ASSERT(&efrm_iobufset_manager->rm == rm); + kfifo_vfree(efrm_iobufset_manager->free_ids); +} + +int +efrm_create_iobufset_resource_manager(struct efrm_resource_manager **rm_out) +{ + int rc, max; + + EFRM_ASSERT(rm_out); + + efrm_iobufset_manager = + kmalloc(sizeof(*efrm_iobufset_manager), GFP_KERNEL); + if (efrm_iobufset_manager == 0) + return -ENOMEM; + memset(efrm_iobufset_manager, 0, sizeof(*efrm_iobufset_manager)); + + /* + * Bug 1145, 1370: We need to set initial size of both the resource + * table and instance id table so they never need to grow as we + * want to be allocate new iobufset at tasklet time. Lets make + * a pessimistic guess at maximum number of iobufsets possible. + * Could be less because + * - jumbo frames have same no of packets per iobufset BUT more + * pages per buffer + * - buffer table entries used independently of iobufsets by + * sendfile + * + * Based on TCP/IP stack setting of PKTS_PER_SET_S=5 ... + * - can't use this define here as it breaks the layering. + */ +#define MIN_PAGES_PER_IOBUFSET (1 << 4) + + max = efrm_buffer_table_size() / MIN_PAGES_PER_IOBUFSET; + max = min_t(int, max, EFRM_IOBUFSET_MAX_NUM_INSTANCES); + + rc = efrm_kfifo_id_ctor(&efrm_iobufset_manager->free_ids, + 0, max, &efrm_iobufset_manager->rm.rm_lock); + if (rc != 0) + goto fail1; + + rc = efrm_resource_manager_ctor(&efrm_iobufset_manager->rm, + iobufset_rm_dtor, "IOBUFSET", + EFRM_RESOURCE_IOBUFSET, max); + if (rc < 0) + goto fail2; + + *rm_out = &efrm_iobufset_manager->rm; + return 0; + +fail2: + kfifo_vfree(efrm_iobufset_manager->free_ids); +fail1: + EFRM_DO_DEBUG(memset(efrm_iobufset_manager, 0, + sizeof(*efrm_iobufset_manager))); + kfree(efrm_iobufset_manager); + return rc; +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/iopage.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/iopage.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,101 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides Linux-specific implementation for iopage API used + * from efhw library. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include "kernel_compat.h" +#include /* for dma_addr_t */ + +int efhw_iopage_alloc(struct efhw_nic *nic, efhw_iopage_t *p) +{ + struct linux_efhw_nic *lnic = linux_efhw_nic(nic); + dma_addr_t handle; + void *kva; + + kva = efrm_pci_alloc_consistent(lnic->pci_dev, PAGE_SIZE, + &handle); + if (kva == 0) + return -ENOMEM; + + EFHW_ASSERT((handle & ~PAGE_MASK) == 0); + + memset((void *)kva, 0, PAGE_SIZE); + efhw_page_init_from_va(&p->p, kva); + + p->dma_addr = handle; + + return 0; +} + +void efhw_iopage_free(struct efhw_nic *nic, efhw_iopage_t *p) +{ + struct linux_efhw_nic *lnic = linux_efhw_nic(nic); + EFHW_ASSERT(efhw_page_is_valid(&p->p)); + + efrm_pci_free_consistent(lnic->pci_dev, PAGE_SIZE, + efhw_iopage_ptr(p), p->dma_addr); +} + +int efhw_iopages_alloc(struct efhw_nic *nic, efhw_iopages_t *p, unsigned order) +{ + unsigned bytes = 1u << (order + PAGE_SHIFT); + struct linux_efhw_nic *lnic = linux_efhw_nic(nic); + dma_addr_t handle; + caddr_t addr; + int gfp_flag; + + /* Set __GFP_COMP if available to make reference counting work. + * This is recommended here: + * http://www.forbiddenweb.org/viewtopic.php?id=83167&page=4#348331 + */ + gfp_flag = ((in_atomic() ? GFP_ATOMIC : GFP_KERNEL) | __GFP_COMP); + addr = efrm_dma_alloc_coherent(&lnic->pci_dev->dev, bytes, &handle, + gfp_flag); + if (addr == NULL) + return -ENOMEM; + + EFHW_ASSERT((handle & ~PAGE_MASK) == 0); + + p->order = order; + p->dma_addr = handle; + p->kva = addr; + + return 0; +} + +void efhw_iopages_free(struct efhw_nic *nic, efhw_iopages_t *p) +{ + unsigned bytes = 1u << (p->order + PAGE_SHIFT); + struct linux_efhw_nic *lnic = linux_efhw_nic(nic); + + efrm_dma_free_coherent(&lnic->pci_dev->dev, bytes, + (void *)p->kva, p->dma_addr); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/kernel_compat.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/kernel_compat.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,584 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file provides compatibility layer for various Linux kernel versions + * (starting from 2.6.9 RHEL kernel). + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#define IN_KERNEL_COMPAT_C +#include +#include +#include "kernel_compat.h" + +/* Set this to 1 to enable very basic counting of iopage(s) allocations, then + * call dump_iopage_counts() to show the number of current allocations of + * orders 0-7. + */ +#define EFRM_IOPAGE_COUNTS_ENABLED 0 + + + +/* I admit that it's a bit ugly going straight to the field, but it + * seems easiest given that get_page followed by put_page on a page + * with PG_reserved set will increment the ref count on 2.6.14 and + * below, but not 2.6.15. Also, RedHat have hidden put_page_testzero + * in a header file which produces warnings when compiled. This + * doesn't agree with our use of -Werror. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) +# define page_count_field(pg) ((pg)->count) +#else +# define page_count_field(pg) ((pg)->_count) +#endif + +#define inc_page_count(page) atomic_inc(&page_count_field(page)) +#define dec_page_count(page) atomic_dec(&page_count_field(page)) + +/* Bug 5531: set_page_count doesn't work if the new page count is an + * expression. */ +#define ci_set_page_count(page, n) set_page_count(page, (n)) + + /* Bug 3965: Gak! Reference counts just don't work on memory + * allocated through pci_alloc_consistent. Different versions and + * architectures do different things. There are several interacting + * bugs/features which have been described below and then summarised + * in a table for each kernel version. For each feature, there is a + * question, a short description, a hint at how to examine the + * kernel code for this feature and a description of the keys in the + * table. + * + * A. Is PG_compound set on multi-page allocations? + * + * When a multi-page allocation succeeds, the kernel sets the + * reference count of the first page to one and the count of the + * remaining pages to zero. This is an immediate problem because + * if these pages are mapped into user space, the VM will do + * get_page followed by put_page, at which point the reference + * count will return to zero and the page will be freed. + * PG_compound was introduced in 2.6.0 and back-ported to rhel3 + * kernels. When it is set, all the pages have a pointer to the + * first page so that they can share the reference count. If + * PG_compound is set, calling get_page(pg+1) can change + * page_count(pg). It was originally set on all multi-page + * allocations, but later only set if the __GFP_COMP flag was + * provided to the allocator. + * + * See mm/page_alloc.c + * Does prep_compound_page get called when __GFP_COMP not set? + * + * Keys: + * NotDef - prep_compound_page and PG_compound are not defined. + * Comp - prep_compound_page is called for any multi-page allocation. + * Opt - prep_compound_page is only called if __GFP_COMP is set. + * OptInv - prep_compound_page is only called if __GFP_NO_COMP is not set. + * + * B. Are bounce buffers ever used to satisfy pci_alloc_consistent? + * (x86_64 only) + * + * 2.6 kernels introduced bounce buffers on x86_64 machines to access + * memory above 4G when using the DMA mapping API. At some point, + * code was added to allow pci_alloc_consistent/dma_alloc_coherent to + * allocate memory from the bounce buffers if the general purpose + * allocator produced memory which wasn't suitable. Such memory can + * be recognised by the PG_reserved bit being set. At a later point, + * the __GFP_DMA32 flag was added and used to restrict the allocator + * to below 4G. The effect of this later change was that 4G capable + * cards would no longer get memory from the bounce buffers, although + * a card which can address less than 4G might get memory from the + * bounce buffers. + * + * See dma_alloc_coherent or pci_alloc_consistent in + * arch/x86_64/kernel/pci-gart.c or arch/x86/kernel/pci-dma_64.c + * Is (gfp |= GFP_DMA32) before dma_alloc_pages? + * Is swiotlb_alloc_coherent called? + * + * Keys: + * NU - bounce buffers are Never Used + * Used - bounce buffers are sometimes used + * + * C. Does munmap decrement the reference count of a PG_reserved page? + * + * Originally, the munmap code would not decrement the reference count + * of a page which had PG_reserved set. At some point in the 2.6 + * series, VM_PFNMAP was introduced and could be set on a vma to + * indicate that no pages in that vma should have the reference count + * decremented (unless they are copy-on-write copies). At that point, + * the check for PG_reserved pages in the munmap code path was + * removed. Some hackery in vm_normal_page means that a VM_PFNMAP vma + * must map contiguous physical pages. As a result, such pages should + * be mapped during mmap using remap_pfn_range (for an example, see + * drivers/char/mem.c). + * + * In 2.6 kernels: See release_pages in mm/swap.c + * Does PageReserved get tested? + * In 2.6 kernels: See mm/memory.c + * Is VM_PFNMAP used? + * In 2.4 kernels: See __free_pte in mm/memory.c + * Does PageReserved get tested? + * In 2.4 kernels: See __free_pages in mm/page_alloc.c + * Does PageReserved get tested? + * + * Keys: + * resv - The reference count is not touched for PG_reserved pages. + * pfnmap - The VM_PFNMAP flag is checked instead of PG_reserved. + * + * D. Does munmap honour the PG_compound bit? + * + * When PG_compound was originally introduced, the munmap code path + * didn't check it before decrementing the reference count on the + * page. As a result, the wrong reference count would be updated if a + * PG_compound page was ever mapped into user space. + * + * In 2.6 kernels: See release_pages in mm/swap.c + * Does PageCompound get tested? + * In 2.4 kernels: See __free_pages in mm/page_alloc.c + * Does PageCompound get tested? + * + * Keys: + * NotHon - The PG_compound bit isn't honoured by munmap. + * Hon - The PG_compound bit is honoured by munmap. + * + * OS A B C D + * 2.4.18 NotDef NU resv NotHon + * 2.4.29 NotDef NU resv NotHon + * 2.4.20-31.9 rhl9 NotDef NU resv NotHon + * + * 2.4.21-4.EL rhel3 Comp NU resv Hon + * 2.4.21-15.EL rhel3 Comp NU resv Hon + * 2.4.21-32.EL rhel3 Comp NU resv Hon + * 2.4.21-40.EL rhel3 Comp NU resv Hon + * + * 2.6.0 Comp NU resv NotHon + * + * 2.6.5-7.97 sles9 OptInv NU resv NotHon + * 2.6.9 rhel4 Opt NU resv NotHon + * + * 2.6.11 fc4 ? ? ? ? + * 2.6.12 fc4 Opt Used resv NotHon + * 2.6.13 Opt Used resv NotHon + * + * 2.6.15 Opt NU pfnmap NotHon + * + * 2.6.16 Opt NU pfnmap Hon + * 2.6.16.9 Opt NU pfnmap Hon + * 2.6.17.2 Opt NU pfnmap Hon + * 2.6.24-rc7 k.org Opt NU pfnmap Hon + * + * This LKML thread gives some low down on mapping pages into user + * space and using DMA. + * http://www.forbiddenweb.org/viewtopic.php?id=83167&page=1 + * + * There is no problem with single page allocations (until some + * kernel hands us a PG_reserved page and expects us to use + * VM_PFNMAP on the vma). + * + * Bug 5450: Most kernels set the reference count to one on the + * first sub-page of a high-order page allocation and zero on + * subsequent sub-pages. Some kernels, however, set the page count + * to one on all the sub-pages. The SLES 9 range are affected, as + * are kernels built without CONFIG_MMU defined. + * + * Possible strategies for multi-page allocations: + * + * EFRM_MMAP_USE_COMPOUND + * 1. Allocate a compound page. Reference counting should then work + * on the whole allocation. This is a good theory, but is broken + * by bug/feature D (above). + * + * EFRM_MMAP_USE_SPLIT + * 2. Convert the multi-page allocation to many single page + * allocations. This involves incrementing the reference counts + * and clearing PG_compound on all the pages (including the + * first). The references should be released _after_ calling + * pci_free_consistent so that that call doesn't release the + * memory. + * + * EFRM_MMAP_USE_INCREMENT + * 3. Increment the reference count on all the pages after + * allocating and decrement them again before freeing. This gets + * round the zero reference count problem. It doesn't handle the + * case where someone else is holding a reference to one of our + * pages when we free the pages, but we think VM_IO stops this + * from happening. + */ + +/* Should we use strategy 1? This can be forced on us by the OS. */ +#if defined(PG_compound) +#define EFRM_MMAP_USE_COMPOUND 1 +#else +#define EFRM_MMAP_USE_COMPOUND 0 +#endif + +/* Should we use strategy 2? This can be used even if strategy 1 is + * used. */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) +#define EFRM_MMAP_USE_SPLIT 1 +#else +#define EFRM_MMAP_USE_SPLIT 0 +#endif + +/* Should we use strategy 3? There's no point doing this if either + * strategy 1 or strategy 2 is used. */ +#if !EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT +#error "We shouldn't have to use this strategy." +#define EFRM_MMAP_USE_INCREMENT 1 +#else +#define EFRM_MMAP_USE_INCREMENT 0 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) +#define EFRM_MMAP_RESET_REFCNT 1 +#else +#define EFRM_MMAP_RESET_REFCNT 0 +#endif + +/* NB. 2.6.17 has renamed SetPageCompound to __SetPageCompound and + * ClearPageCompound to __ClearPageCompound. */ +#if ((defined(PageCompound) != defined(PG_compound)) || \ + (defined(SetPageCompound) != defined(PG_compound) && \ + defined(__SetPageCompound) != defined(PG_compound)) || \ + (defined(ClearPageCompound) != defined(PG_compound) && \ + defined(__ClearPageCompound) != defined(PG_compound)) || \ + (defined(__GFP_COMP) && !defined(PG_compound))) +#error Mismatch of defined page-flags. +#endif + +extern int use_pci_alloc; /* Use pci_alloc_consistent to alloc iopages */ + +/**************************************************************************** + * + * allocate a buffer suitable for DMA to/from the NIC + * + ****************************************************************************/ + +static inline void pci_mmap_pages_hack_after_alloc(caddr_t kva, unsigned order) +{ + unsigned pfn = __pa(kva) >> PAGE_SHIFT; + struct page *start_pg = pfn_to_page(pfn); +#if !defined(NDEBUG) || EFRM_MMAP_USE_SPLIT + struct page *end_pg = start_pg + (1 << order); + struct page *pg; +#endif + + /* Compound pages don't get created for order 0 pages and there's no + * fixing up needs to be done. */ + if (order == 0) + return; + + /* If we've been given a reserved page then it must have come from + * the bounce buffer pool. */ + if (PageReserved(start_pg)) { +#if defined(VM_PFNMAP) || !defined(__x86_64__) + /* Kernel allocated reserved pages when not expected */ + BUG(); +#endif + return; + } + + /* Check the page count and PG_compound bit. */ +#ifndef NDEBUG +# if defined(PG_compound) + EFRM_ASSERT(PageCompound(start_pg) == EFRM_MMAP_USE_COMPOUND); +# endif + EFRM_ASSERT(page_count(start_pg) == 1); + + { + /* Some kernels have the page count field hold (ref_count-1) + * rather than (ref_count). This is so that decrementing the + * reference count to "zero" causes the internal value to change + * from 0 to -1 which sets the carry flag. Other kernels store + * the real reference count value in the obvious way. We handle + * this here by reading the reference count field of the first + * page, which is always 1. */ + int pg_count_zero; + pg_count_zero = atomic_read(&page_count_field(start_pg)) - 1; + for (pg = start_pg + 1; pg < end_pg; pg++) { + int pg_count; +# if defined(PG_compound) + EFRM_ASSERT(PageCompound(pg) == EFRM_MMAP_USE_COMPOUND); +# endif + + /* Bug 5450: Some kernels initialise the page count + * to one for pages other than the first and some + * leave it at zero. We allow either behaviour + * here, but disallow anything strange. Newer + * kernels only define set_page_count in an + * internal header file, so we have to make do with + * incrementing and decrementing the reference + * count. Fortunately, those kernels don't set the + * reference count to one on all the pages. */ + pg_count = atomic_read(&page_count_field(pg)); +# if EFRM_MMAP_RESET_REFCNT + if (pg_count != pg_count_zero) + EFRM_ASSERT(pg_count == pg_count_zero + 1); +# else + EFRM_ASSERT(pg_count == pg_count_zero); +# endif + } + } +#endif + + /* Split the multi-page allocation if necessary. */ +#if EFRM_MMAP_USE_SPLIT + for (pg = start_pg; pg < end_pg; pg++) { + + /* This is no longer a compound page. */ +# if EFRM_MMAP_USE_COMPOUND + ClearPageCompound(pg); + EFRM_ASSERT(PageCompound(pg) == 0); +# endif + +# ifndef NDEBUG + { + int pg_count = page_count(pg); + /* Bug 5450: The page count can be zero or one here. */ + if (pg == start_pg) { + EFRM_ASSERT(pg_count == 1); + } else { +# if EFRM_MMAP_RESET_REFCNT + if (pg_count != 0) + EFRM_ASSERT(pg_count == 1); +# else + EFRM_ASSERT(pg_count == 0); +# endif + } + } +# endif + + /* Get a reference which will be released after the pages have + * been passed back to pci_free_consistent. */ +# if EFRM_MMAP_RESET_REFCNT + /* Bug 5450: Reset the reference count since the count might + * already be 1. */ + ci_set_page_count(pg, (pg == start_pg) ? 2 : 1); +# else + get_page(pg); +# endif + } +#endif + + /* Fudge the reference count if necessary. */ +#if EFRM_MMAP_USE_INCREMENT + for (pg = start_pg; pg < end_pg; pg++) + inc_page_count(pg); +#endif +} + +static inline void pci_mmap_pages_hack_before_free(caddr_t kva, unsigned order) +{ +#if EFRM_MMAP_USE_INCREMENT || !defined(NDEBUG) + /* Drop the references taken in pci_mmap_pages_hack_after_alloc */ + unsigned pfn = __pa(kva) >> PAGE_SHIFT; + struct page *start_pg = pfn_to_page(pfn); + struct page *end_pg = start_pg + (1 << order); + struct page *pg; + + /* Compound pages don't get created for order 0 pages and there's no + * fixing up needs to be done. */ + if (order == 0) + return; + + if (PageReserved(start_pg)) + return; + +# if EFRM_MMAP_USE_INCREMENT + for (pg = start_pg; pg < end_pg; pg++) + dec_page_count(pg); +# endif + +#if !defined(NDEBUG) + EFRM_ASSERT(page_count(start_pg) == 1+EFRM_MMAP_USE_SPLIT); + +# if EFRM_MMAP_USE_COMPOUND && !EFRM_MMAP_USE_SPLIT + for (pg = start_pg; pg < end_pg; pg++) + EFRM_ASSERT(PageCompound(pg)); +# else + for (pg = start_pg+1; pg < end_pg; pg++) { + unsigned exp_pg_count = EFRM_MMAP_USE_SPLIT; + /* NB. If this assertion fires, either we've messed up the + * page counting or someone is holding on to a reference. + */ + EFRM_ASSERT(page_count(pg) == exp_pg_count); + } +# endif +#endif + +#endif +} + +static inline void pci_mmap_pages_hack_after_free(caddr_t kva, unsigned order) +{ +#if EFRM_MMAP_USE_SPLIT + /* Drop the references taken in pci_mmap_pages_hack_after_alloc */ + unsigned pfn = __pa(kva) >> PAGE_SHIFT; + struct page *start_pg = pfn_to_page(pfn); + struct page *end_pg = start_pg + (1 << order); + struct page *pg; + + /* Compound pages don't get created for order 0 pages and there's no + * fixing up needs to be done. */ + if (order == 0) + return; + + if (PageReserved(start_pg)) + return; + + for (pg = start_pg; pg < end_pg; pg++) { + EFRM_ASSERT(page_count(pg) == 1); + put_page(pg); + } +#endif +} + + +#if EFRM_IOPAGE_COUNTS_ENABLED + +static int iopage_counts[8]; + +void dump_iopage_counts(void) +{ + EFRM_NOTICE("iopage counts: %d %d %d %d %d %d %d %d", iopage_counts[0], + iopage_counts[1], iopage_counts[2], iopage_counts[3], + iopage_counts[4], iopage_counts[5], iopage_counts[6], + iopage_counts[7]); +} + +#endif + + + +/*********** pci_alloc_consistent / pci_free_consistent ***********/ + +void *efrm_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, int flag) +{ + struct pci_dev *pci_dev; + void *ptr; + unsigned order; + EFRM_IOMMU_DECL; + + order = __ffs(size/PAGE_SIZE); + EFRM_ASSERT(size == (PAGE_SIZE< + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H +#define DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H + +#include + +/********* wait_for_completion_timeout() ********************/ +#include + +/* RHEL_RELEASE_CODE from linux/version.h is only defined for 2.6.9-55EL + * UTS_RELEASE is unfortunately unusable + * Really only need this fix for <2.6.9-34EL + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)) && \ + !defined(RHEL_RELEASE_CODE) + +static inline unsigned long fastcall __sched +efrm_wait_for_completion_timeout(struct completion *x, unsigned long timeout) +{ + might_sleep(); + + spin_lock_irq(&x->wait.lock); + if (!x->done) { + DECLARE_WAITQUEUE(wait, current); + + wait.flags |= WQ_FLAG_EXCLUSIVE; + __add_wait_queue_tail(&x->wait, &wait); + do { + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&x->wait.lock); + timeout = schedule_timeout(timeout); + spin_lock_irq(&x->wait.lock); + if (!timeout) { + __remove_wait_queue(&x->wait, &wait); + goto out; + } + } while (!x->done); + __remove_wait_queue(&x->wait, &wait); + } + x->done--; +out: + spin_unlock_irq(&x->wait.lock); + return timeout; +} + +# ifdef wait_for_completion_timeout +# undef wait_for_completion_timeout +# endif +# define wait_for_completion_timeout efrm_wait_for_completion_timeout + +#endif + +/********* pci_map_*() ********************/ + +#include + +/* Bug 4560: Some kernels leak IOMMU entries under heavy load. Use a + * spinlock to serialise access where possible to alleviate the + * problem. + * + * NB. This is duplicated in the net driver. Please keep in sync. */ +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)) && \ + defined(__x86_64__) && defined(CONFIG_SMP)) + +#define EFRM_HAVE_IOMMU_LOCK 1 + +#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,5)) && \ + defined(CONFIG_SUSE_KERNEL)) +#define EFRM_NEED_ALTERNATE_MAX_PFN 1 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) +#if defined(CONFIG_GART_IOMMU) +#define EFRM_NO_IOMMU no_iommu +#else +#define EFRM_NO_IOMMU 1 +#endif +#else +#define EFRM_NO_IOMMU 0 +#endif + +/* Set to 0 if we should never use the lock. Set to 1 if we should + * automatically determine if we should use the lock. Set to 2 if we + * should always use the lock. */ +extern unsigned int efx_use_iommu_lock; +/* Defined in the net driver. */ +extern spinlock_t efx_iommu_lock; +/* Non-zero if there is a card which needs the lock. */ +extern int efrm_need_iommu_lock; + +/* The IRQ state is needed if the lock is being used. The flag is + * cached to ensure that every lock is followed by an unlock, even + * if the global flag changes in the middle of the operation. */ + +#define EFRM_IOMMU_DECL \ + unsigned long efx_iommu_irq_state = 0; \ + int efx_iommu_using_lock; +#define EFRM_IOMMU_LOCK() \ + do { \ + efx_iommu_using_lock = (efx_use_iommu_lock && \ + (efrm_need_iommu_lock || \ + efx_use_iommu_lock >= 2)); \ + if (efx_iommu_using_lock) \ + spin_lock_irqsave(&efx_iommu_lock, efx_iommu_irq_state);\ + } while (0) +#define EFRM_IOMMU_UNLOCK() \ + do { \ + if (efx_iommu_using_lock) \ + spin_unlock_irqrestore(&efx_iommu_lock, \ + efx_iommu_irq_state); \ + } while (0) + +#else /* defined(__x86_64__) && defined(CONFIG_SMP) */ + +#define EFRM_HAVE_IOMMU_LOCK 0 +#define EFRM_IOMMU_DECL +#define EFRM_IOMMU_LOCK() do {} while (0) +#define EFRM_IOMMU_UNLOCK() do {} while (0) + +#endif + +static inline dma_addr_t efrm_pci_map_single(struct pci_dev *hwdev, void *ptr, + size_t size, int direction) +{ + dma_addr_t dma_addr; + EFRM_IOMMU_DECL; + + EFRM_IOMMU_LOCK(); + dma_addr = pci_map_single(hwdev, ptr, size, direction); + EFRM_IOMMU_UNLOCK(); + + return dma_addr; +} + +static inline void efrm_pci_unmap_single(struct pci_dev *hwdev, + dma_addr_t dma_addr, size_t size, + int direction) +{ + EFRM_IOMMU_DECL; + + EFRM_IOMMU_LOCK(); + pci_unmap_single(hwdev, dma_addr, size, direction); + EFRM_IOMMU_UNLOCK(); +} + +static inline dma_addr_t efrm_pci_map_page(struct pci_dev *hwdev, + struct page *page, + unsigned long offset, size_t size, + int direction) +{ + dma_addr_t dma_addr; + EFRM_IOMMU_DECL; + + EFRM_IOMMU_LOCK(); + dma_addr = pci_map_page(hwdev, page, offset, size, direction); + EFRM_IOMMU_UNLOCK(); + + return dma_addr; +} + +static inline void efrm_pci_unmap_page(struct pci_dev *hwdev, + dma_addr_t dma_addr, size_t size, + int direction) +{ + EFRM_IOMMU_DECL; + + EFRM_IOMMU_LOCK(); + pci_unmap_page(hwdev, dma_addr, size, direction); + EFRM_IOMMU_UNLOCK(); +} + +#ifndef IN_KERNEL_COMPAT_C +# ifndef __GFP_COMP +# define __GFP_COMP 0 +# endif +# ifndef __GFP_ZERO +# define __GFP_ZERO 0 +# endif +#endif + +extern void *efrm_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, int flag); + +extern void efrm_dma_free_coherent(struct device *dev, size_t size, + void *ptr, dma_addr_t dma_addr); + +static inline void *efrm_pci_alloc_consistent(struct pci_dev *hwdev, + size_t size, + dma_addr_t *dma_addr) +{ + return efrm_dma_alloc_coherent(&hwdev->dev, size, dma_addr, + GFP_ATOMIC); +} + +static inline void efrm_pci_free_consistent(struct pci_dev *hwdev, size_t size, + void *ptr, dma_addr_t dma_addr) +{ + efrm_dma_free_coherent(&hwdev->dev, size, ptr, dma_addr); +} + +#endif /* DRIVER_LINUX_RESOURCE_KERNEL_COMPAT_H */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/kernel_proc.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/kernel_proc.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,111 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains /proc/driver/sfc_resource/ implementation. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include + +/** Top level directory for sfc specific stats **/ +static struct proc_dir_entry *efrm_proc_root; /* = NULL */ + +static int +efrm_resource_read_proc(char *buf, char **start, off_t offset, int count, + int *eof, void *data); + +int efrm_install_proc_entries(void) +{ + /* create the top-level directory for etherfabric specific stuff */ + efrm_proc_root = proc_mkdir("sfc_resource", proc_root_driver); + if (!efrm_proc_root) + return -ENOMEM; + EFRM_ASSERT(efrm_proc_root); + + if (create_proc_read_entry("resources", 0, efrm_proc_root, + efrm_resource_read_proc, 0) == NULL) { + EFRM_WARN("%s: Unable to create /proc/drivers/sfc_resource/" + "resources", __FUNCTION__); + } + return 0; +} + +void efrm_uninstall_proc_entries(void) +{ + EFRM_ASSERT(efrm_proc_root); + remove_proc_entry("resources", efrm_proc_root); + remove_proc_entry("sfc_resource", proc_root_driver); + efrm_proc_root = NULL; +} + +/**************************************************************************** + * + * /proc/drivers/sfc/resources + * + ****************************************************************************/ + +#define EFRM_PROC_PRINTF(buf, len, fmt, ...) \ + do { \ + if (count - len > 0) \ + len += snprintf(buf+len, count-len, (fmt), \ + __VA_ARGS__); \ + } while (0) + +static int +efrm_resource_read_proc(char *buf, char **start, off_t offset, int count, + int *eof, void *data) +{ + irq_flags_t lock_flags; + int len = 0; + int type; + struct efrm_resource_manager *rm; + + for (type = 0; type < EFRM_RESOURCE_NUM; type++) { + rm = efrm_rm_table[type]; + if (rm == NULL) + continue; + + EFRM_PROC_PRINTF(buf, len, "*** %s ***\n", rm->rm_name); + + spin_lock_irqsave(&rm->rm_lock, lock_flags); + EFRM_PROC_PRINTF(buf, len, "current = %u\n", rm->rm_resources); + EFRM_PROC_PRINTF(buf, len, " max = %u\n\n", + rm->rm_resources_hiwat); + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + } + + return count ? strlen(buf) : 0; +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/kfifo.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/kfifo.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,212 @@ +/* + * A simple kernel FIFO implementation. + * + * Copyright (C) 2004 Stelian Pop + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +/* + * This file is stolen from the Linux kernel sources + * (linux-2.6.22/kernel/kfifo.c) into sfc_resource driver. + * It should be used for old kernels without kfifo implementation. + * Most part of linux/kfifo.h is incorporated into + * ci/efrm/sysdep_linux.h. + */ +#include +#ifdef HAS_NO_KFIFO + +#include +#include +#include +#include +/*#include */ + +/** + * kfifo_init - allocates a new FIFO using a preallocated buffer + * @buffer: the preallocated buffer to be used. + * @size: the size of the internal buffer, this have to be a power of 2. + * @gfp_mask: get_free_pages mask, passed to kmalloc() + * @lock: the lock to be used to protect the fifo buffer + * + * Do NOT pass the kfifo to kfifo_free() after use! Simply free the + * &struct kfifo with kfree(). + */ +struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, + gfp_t gfp_mask, spinlock_t * lock) +{ + struct kfifo *fifo; + + /* size must be a power of 2 */ + BUG_ON(size & (size - 1)); + + fifo = kmalloc(sizeof(struct kfifo), gfp_mask); + if (!fifo) + return ERR_PTR(-ENOMEM); + + fifo->buffer = buffer; + fifo->size = size; + fifo->in = fifo->out = 0; + fifo->lock = lock; + + return fifo; +} + +EXPORT_SYMBOL(kfifo_init); + +/** + * kfifo_alloc - allocates a new FIFO and its internal buffer + * @size: the size of the internal buffer to be allocated. + * @gfp_mask: get_free_pages mask, passed to kmalloc() + * @lock: the lock to be used to protect the fifo buffer + * + * The size will be rounded-up to a power of 2. + */ +struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t * lock) +{ + unsigned char *buffer; + struct kfifo *ret; + + /* + * round up to the next power of 2, since our 'let the indices + * wrap' tachnique works only in this case. + */ + if (size & (size - 1)) { + BUG_ON(size > 0x80000000); + size = roundup_pow_of_two(size); + } + + buffer = kmalloc(size, gfp_mask); + if (!buffer) + return ERR_PTR(-ENOMEM); + + ret = kfifo_init(buffer, size, gfp_mask, lock); + + if (IS_ERR(ret)) + kfree(buffer); + + return ret; +} + +EXPORT_SYMBOL(kfifo_alloc); + +/** + * kfifo_free - frees the FIFO + * @fifo: the fifo to be freed. + */ +void kfifo_free(struct kfifo *fifo) +{ + kfree(fifo->buffer); + kfree(fifo); +} + +EXPORT_SYMBOL(kfifo_free); + +/** + * __kfifo_put - puts some data into the FIFO, no locking version + * @fifo: the fifo to be used. + * @buffer: the data to be added. + * @len: the length of the data to be added. + * + * This function copies at most @len bytes from the @buffer into + * the FIFO depending on the free space, and returns the number of + * bytes copied. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these functions. + */ +unsigned int +__kfifo_put(struct kfifo *fifo, unsigned char *buffer, unsigned int len) +{ + unsigned int l; + + len = min(len, fifo->size - fifo->in + fifo->out); + + /* + * Ensure that we sample the fifo->out index -before- we + * start putting bytes into the kfifo. + */ + + smp_mb(); + + /* first put the data starting from fifo->in to buffer end */ + l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); + memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); + + /* then put the rest (if any) at the beginning of the buffer */ + memcpy(fifo->buffer, buffer + l, len - l); + + /* + * Ensure that we add the bytes to the kfifo -before- + * we update the fifo->in index. + */ + + smp_wmb(); + + fifo->in += len; + + return len; +} + +EXPORT_SYMBOL(__kfifo_put); + +/** + * __kfifo_get - gets some data from the FIFO, no locking version + * @fifo: the fifo to be used. + * @buffer: where the data must be copied. + * @len: the size of the destination buffer. + * + * This function copies at most @len bytes from the FIFO into the + * @buffer and returns the number of copied bytes. + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these functions. + */ +unsigned int +__kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len) +{ + unsigned int l; + + len = min(len, fifo->in - fifo->out); + + /* + * Ensure that we sample the fifo->in index -before- we + * start removing bytes from the kfifo. + */ + + smp_rmb(); + + /* first get the data from fifo->out until the end of the buffer */ + l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); + memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); + + /* then get the rest (if any) from the beginning of the buffer */ + memcpy(buffer + l, fifo->buffer, len - l); + + /* + * Ensure that we remove the bytes from the kfifo -before- + * we update the fifo->out index. + */ + + smp_mb(); + + fifo->out += len; + + return len; +} + +EXPORT_SYMBOL(__kfifo_get); +#endif Index: head-2008-03-17/drivers/net/sfc/sfc_resource/linux_resource_internal.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/linux_resource_internal.h 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,75 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains Linux-specific API internal for the resource driver. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#ifndef __LINUX_RESOURCE_INTERNAL__ +#define __LINUX_RESOURCE_INTERNAL__ + +#include +#include +#include +#include + + +/*! Linux specific EtherFabric initialisation */ +extern int +linux_efrm_nic_ctor(struct linux_efhw_nic *, struct pci_dev *, + spinlock_t *reg_lock, + unsigned nic_flags, unsigned nic_options); + +/*! Linux specific EtherFabric initialisation */ +extern void linux_efrm_nic_dtor(struct linux_efhw_nic *); + +/*! Linux specific EtherFabric initialisation -- interrupt registration */ +extern int linux_efrm_irq_ctor(struct linux_efhw_nic *); + +/*! Linux specific EtherFabric initialisation -- interrupt deregistration */ +extern void linux_efrm_irq_dtor(struct linux_efhw_nic *); + +extern int efrm_driverlink_register(void); +extern void efrm_driverlink_unregister(void); + +extern int +efrm_nic_add(struct pci_dev *dev, unsigned int opts, const uint8_t *mac_addr, + struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock, + int bt_min, int bt_max, const struct vi_resource_dimensions *); +extern void efrm_nic_del(struct linux_efhw_nic *); + + +extern int efrm_install_proc_entries(void); +extern void efrm_uninstall_proc_entries(void); + +#endif /* __LINUX_RESOURCE_INTERNAL__ */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/nic.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/nic.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,190 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains EtherFabric Generic NIC instance (init, interrupts, + * etc) + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include + + +int efhw_device_type_init(struct efhw_device_type *dt, + int vendor_id, int device_id, + int class_revision) +{ + if (vendor_id != 0x1924) + return 0; + + switch (device_id) { + case 0x0703: + case 0x6703: + dt->arch = EFHW_ARCH_FALCON; + dt->variant = 'A'; + switch (class_revision) { + case 0: + dt->revision = 0; + break; + case 1: + dt->revision = 1; + break; + default: + return 0; + } + break; + case 0x0710: + dt->arch = EFHW_ARCH_FALCON; + dt->variant = 'B'; + switch (class_revision) { + case 2: + dt->revision = 0; + break; + default: + return 0; + } + break; + default: + return 0; + } + + return 1; +} + + +/*-------------------------------------------------------------------- + * + * NIC Initialisation + * + *--------------------------------------------------------------------*/ + +/* make this separate from initialising data structure +** to allow this to be called at a later time once we can access PCI +** config space to find out what hardware we have +*/ +void efhw_nic_init(struct efhw_nic *nic, unsigned flags, unsigned options, + struct efhw_device_type dev_type) +{ + int i; + + nic->devtype = dev_type; + nic->flags = flags; + nic->options = options; + nic->bar_ioaddr = 0; + spin_lock_init(&nic->the_reg_lock); + nic->reg_lock = &nic->the_reg_lock; + nic->mtu = 1500 + ETH_HLEN; + + for (i = 0; i < EFHW_KEVENTQ_MAX; i++) + nic->irq_unit[i] = EFHW_IRQ_UNIT_UNUSED; + + switch (nic->devtype.arch) { + case EFHW_ARCH_FALCON: + nic->evq_sizes = 512 | 1024 | 2048 | 4096 | 8192 | + 16384 | 32768; + nic->txq_sizes = 512 | 1024 | 2048 | 4096; + nic->rxq_sizes = 512 | 1024 | 2048 | 4096; + nic->efhw_func = &falcon_char_functional_units; + nic->ctr_ap_bytes = EFHW_64M; + switch (nic->devtype.variant) { + case 'A': + nic->ctr_ap_bar = FALCON_S_CTR_AP_BAR; + break; + case 'B': + nic->flags |= NIC_FLAG_NO_INTERRUPT; + nic->ctr_ap_bar = FALCON_P_CTR_AP_BAR; + break; + default: + EFHW_ASSERT(0); + break; + } + break; + default: + EFHW_ASSERT(0); + break; + } +} + + +void efhw_nic_close_interrupts(struct efhw_nic *nic) +{ + int i; + + EFHW_ASSERT(nic); + if (!efhw_nic_have_hw(nic)) + return; + + EFHW_ASSERT(efhw_nic_have_hw(nic)); + + for (i = 0; i < EFHW_KEVENTQ_MAX; i++) { + if (nic->irq_unit[i] != EFHW_IRQ_UNIT_UNUSED) + efhw_nic_interrupt_disable(nic, i); + } +} + +void efhw_nic_dtor(struct efhw_nic *nic) +{ + EFHW_ASSERT(nic); + + /* Check that we have functional units because the software only + * driver doesn't initialise anything hardware related any more */ + +#ifndef __ci_ul_driver__ + /* close interrupts is called first because the act of deregistering + the driver could cause this driver to change from master to slave + and hence the implicit interrupt mappings would be wrong */ + + EFHW_TRACE("%s: functional units ... ", __FUNCTION__); + + if (efhw_nic_have_functional_units(nic)) { + efhw_nic_close_interrupts(nic); + efhw_nic_close_hardware(nic); + } + EFHW_TRACE("%s: functional units ... done", __FUNCTION__); +#endif + + /* destroy event queues */ + EFHW_TRACE("%s: event queues ... ", __FUNCTION__); + +#ifndef __ci_ul_driver__ + { + int i; + for (i = 0; i < EFHW_KEVENTQ_MAX; ++i) + if (nic->evq[i].evq_mask) + efhw_keventq_dtor(nic, &nic->evq[i]); + } +#endif + + EFHW_TRACE("%s: event queues ... done", __FUNCTION__); + + spin_lock_destroy(&nic->the_reg_lock); + + EFHW_TRACE("%s: DONE", __FUNCTION__); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/resource_driver.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/resource_driver.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,640 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains main driver entry points. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include "linux_resource_internal.h" +#include "kernel_compat.h" +#include +#include +#include +#include +#include +#include +#include + +#if EFRM_HAVE_IOMMU_LOCK +#ifdef EFRM_NEED_ALTERNATE_MAX_PFN +extern unsigned long blk_max_pfn; +#define max_pfn blk_max_pfn +#else +#include +#endif +#endif + +MODULE_AUTHOR("Solarflare Communications"); +MODULE_LICENSE("GPL"); + +static struct efhw_ev_handler ev_handler = { + .wakeup_fn = efrm_handle_wakeup_event, + .timeout_fn = efrm_handle_timeout_event, + .dmaq_flushed_fn = efrm_handle_dmaq_flushed, +}; + +#if EFRM_HAVE_IOMMU_LOCK +int efrm_need_iommu_lock; +EXPORT_SYMBOL(efrm_need_iommu_lock); +#endif + +const int max_hardware_init_repeats = 10; + +/*-------------------------------------------------------------------- + * + * Module load time variables + * + *--------------------------------------------------------------------*/ +/* See docs/notes/pci_alloc_consistent */ +int use_pci_alloc = 1; /* Use pci_alloc_consistent to alloc iopages */ +static int do_irq = 1; /* enable interrupts */ + +#if defined(CONFIG_X86_XEN) +static int irq_moderation = 60; /* interrupt moderation (60 usec) */ +#else +static int irq_moderation = 20; /* interrupt moderation (20 usec) */ +#endif +static int nic_options = NIC_OPT_DEFAULT; +int efx_vi_eventq_size = EFX_VI_EVENTQ_SIZE_DEFAULT; + +module_param(do_irq, int, S_IRUGO); +MODULE_PARM_DESC(do_irq, "Enable interrupts. " + "Do not turn it off unless you know what are you doing."); +module_param(irq_moderation, int, S_IRUGO); +MODULE_PARM_DESC(irq_moderation, "IRQ moderation in usec"); +module_param(nic_options, int, S_IRUGO); +MODULE_PARM_DESC(nic_options, "Nic options -- see efhw_types.h"); +module_param(use_pci_alloc, int, S_IRUGO); +MODULE_PARM_DESC(use_pci_alloc, "Use pci_alloc_consistent to alloc iopages " + "(autodetected by kernel version)"); +module_param(efx_vi_eventq_size, int, S_IRUGO); +MODULE_PARM_DESC(efx_vi_eventq_size, + "Size of event queue allocated by efx_vi library"); + +/*-------------------------------------------------------------------- + * + * Linux specific NIC initialisation + * + *--------------------------------------------------------------------*/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +# define IRQ_PT_REGS_ARG , struct pt_regs *regs __attribute__ ((unused)) +#else +# define IRQ_PT_REGS_ARG +#endif + +#ifndef IRQF_SHARED +# define IRQF_SHARED SA_SHIRQ +#endif + +static inline irqreturn_t +linux_efrm_interrupt(int irr, void *dev_id IRQ_PT_REGS_ARG) +{ + return efhw_nic_interrupt((struct efhw_nic *)dev_id); +} + +int linux_efrm_irq_ctor(struct linux_efhw_nic *lnic) +{ + struct efhw_nic *nic = &lnic->nic; + + nic->flags &= ~NIC_FLAG_MSI; + if (nic->flags & NIC_FLAG_TRY_MSI) { + int rc = pci_enable_msi(lnic->pci_dev); + if (rc < 0) { + EFRM_WARN("%s: Could not enable MSI (%d)", + __FUNCTION__, rc); + EFRM_WARN("%s: Continuing with legacy interrupt mode", + __FUNCTION__); + } else { + EFRM_NOTICE("%s: MSI enabled", __FUNCTION__); + nic->flags |= NIC_FLAG_MSI; + } + } + + if (request_irq(lnic->pci_dev->irq, linux_efrm_interrupt, + IRQF_SHARED, "sfc_resource", nic)) { + EFRM_ERR("Request for interrupt #%d failed", + lnic->pci_dev->irq); + nic->flags &= ~NIC_FLAG_OS_IRQ_EN; + return -EBUSY; + } + nic->flags |= NIC_FLAG_OS_IRQ_EN; + + return 0; +} + +void linux_efrm_irq_dtor(struct linux_efhw_nic *lnic) +{ + EFRM_TRACE("linux_efrm_irq_dtor: start"); + + if (lnic->nic.flags & NIC_FLAG_OS_IRQ_EN) { + free_irq(lnic->pci_dev->irq, &lnic->nic); + lnic->nic.flags &= ~NIC_FLAG_OS_IRQ_EN; + } + + if (lnic->nic.flags & NIC_FLAG_MSI) { + pci_disable_msi(lnic->pci_dev); + lnic->nic.flags &= ~NIC_FLAG_MSI; + } + + EFRM_TRACE("linux_efrm_irq_dtor: done"); +} + +/* Allocate buffer table entries for a particular NIC. + */ +static int efrm_nic_buffer_table_alloc(struct efhw_nic *nic) +{ + int capacity; + int page_order; + int i; + int rc; + + /* Choose queue size. */ + for (capacity = 8192; capacity <= nic->evq_sizes; capacity <<= 1) { + if (capacity > nic->evq_sizes) { + EFRM_ERR + ("%s: Unable to choose EVQ size (supported=%x)", + __FUNCTION__, nic->evq_sizes); + return -E2BIG; + } else if (capacity & nic->evq_sizes) + break; + } + for (i = 0; i < EFHW_KEVENTQ_MAX; ++i) { + nic->evq[i].hw.capacity = capacity; + nic->evq[i].hw.buf_tbl_alloc.base = (unsigned)-1; + } + + /* allocate buffer table entries to map onto the iobuffer */ + page_order = get_order(capacity * sizeof(efhw_event_t)); + if (!(nic->flags & NIC_FLAG_NO_INTERRUPT)) { + rc = efrm_buffer_table_alloc(page_order, + &nic->evq[0].hw.buf_tbl_alloc); + if (rc < 0) { + EFRM_WARN + ("%s: failed (%d) to alloc %d buffer table entries", + __FUNCTION__, rc, page_order); + return rc; + } + } + rc = efrm_buffer_table_alloc(page_order, + &nic->evq[FALCON_EVQ_NONIRQ].hw. + buf_tbl_alloc); + if (rc < 0) { + EFRM_WARN + ("%s: failed (%d) to alloc %d buffer table entries", + __FUNCTION__, rc, page_order); + return rc; + } + + return 0; +} + +/* Free buffer table entries allocated for a particular NIC. + */ +static void efrm_nic_buffer_table_free(struct efhw_nic *nic) +{ + int i; + for (i = 0; i <= FALCON_EVQ_NONIRQ; i++) + if (nic->evq[i].hw.buf_tbl_alloc.base != (unsigned)-1) + efrm_buffer_table_free(&nic->evq[i].hw.buf_tbl_alloc); + +} + +static int iomap_bar(struct linux_efhw_nic *lnic, size_t len) +{ + efhw_ioaddr_t ioaddr; + + ioaddr = ioremap_nocache(lnic->ctr_ap_pci_addr, len); + if (ioaddr == 0) + return -ENOMEM; + + lnic->nic.bar_ioaddr = ioaddr; + return 0; +} + +static int linux_efhw_nic_map_ctr_ap(struct linux_efhw_nic *lnic) +{ + struct efhw_nic *nic = &lnic->nic; + int rc; + + rc = iomap_bar(lnic, nic->ctr_ap_bytes); + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + /* Bug 5195: workaround for now. */ + if (rc != 0 && nic->ctr_ap_bytes > 16 * 1024 * 1024) { + /* Try half the size for now. */ + nic->ctr_ap_bytes /= 2; + EFRM_WARN("Bug 5195 WORKAROUND: retrying iomap of %d bytes", + nic->ctr_ap_bytes); + rc = iomap_bar(lnic, nic->ctr_ap_bytes); + } +#endif + + if (rc < 0) { + EFRM_ERR("Failed (%d) to map bar (%d bytes)", + rc, nic->ctr_ap_bytes); + return rc; + } + + return rc; +} + +int +linux_efrm_nic_ctor(struct linux_efhw_nic *lnic, struct pci_dev *dev, + spinlock_t *reg_lock, + unsigned nic_flags, unsigned nic_options) +{ + struct efhw_device_type dev_type; + struct efhw_nic *nic = &lnic->nic; + u8 class_revision; + int rc; + + rc = pci_read_config_byte(dev, PCI_CLASS_REVISION, &class_revision); + if (rc != 0) { + EFRM_ERR("%s: pci_read_config_byte failed (%d)", + __FUNCTION__, rc); + return rc; + } + + if (!efhw_device_type_init(&dev_type, dev->vendor, dev->device, + class_revision)) { + EFRM_ERR("%s: efhw_device_type_init failed %04x:%04x(%d)", + __FUNCTION__, (unsigned) dev->vendor, + (unsigned) dev->device, (int) class_revision); + return -ENODEV; + } + + EFRM_NOTICE("attaching device type %04x:%04x %d:%c%d", + (unsigned) dev->vendor, (unsigned) dev->device, + dev_type.arch, dev_type.variant, dev_type.revision); + + /* Initialise the adapter-structure. */ + efhw_nic_init(nic, nic_flags, nic_options, dev_type); + lnic->pci_dev = dev; + + rc = pci_enable_device(dev); + if (rc < 0) { + EFRM_ERR("%s: pci_enable_device failed (%d)", + __FUNCTION__, rc); + return rc; + } + + lnic->ctr_ap_pci_addr = pci_resource_start(dev, nic->ctr_ap_bar); + + if (!pci_dma_supported(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) { + EFRM_ERR("%s: pci_dma_supported(%lx) failed", __FUNCTION__, + (unsigned long)EFHW_DMA_ADDRMASK); + return -ENODEV; + } + + if (pci_set_dma_mask(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) { + EFRM_ERR("%s: pci_set_dma_mask(%lx) failed", __FUNCTION__, + (unsigned long)EFHW_DMA_ADDRMASK); + return -ENODEV; + } + + if (pci_set_consistent_dma_mask(dev, (dma_addr_t)EFHW_DMA_ADDRMASK)) { + EFRM_ERR("%s: pci_set_consistent_dma_mask(%lx) failed", + __FUNCTION__, (unsigned long)EFHW_DMA_ADDRMASK); + return -ENODEV; + } + + rc = linux_efhw_nic_map_ctr_ap(lnic); + if (rc < 0) + return rc; + + /* By default struct efhw_nic contains its own lock for protecting + * access to nic registers. We override it with a pointer to the + * lock in the net driver. This is needed when resource and net + * drivers share a single PCI function (falcon B series). + */ + nic->reg_lock = reg_lock; + return 0; +} + +void linux_efrm_nic_dtor(struct linux_efhw_nic *lnic) +{ + struct efhw_nic *nic = &lnic->nic; + efhw_ioaddr_t bar_ioaddr = nic->bar_ioaddr; + + efhw_nic_dtor(nic); + + efrm_nic_buffer_table_free(nic); + + /* Unmap the bar. */ + EFRM_ASSERT(bar_ioaddr); + iounmap(bar_ioaddr); + nic->bar_ioaddr = 0; +} + +/**************************************************************************** + * + * efrm_tasklet - used to poll the eventq which may result in further callbacks + * + ****************************************************************************/ + +static void efrm_tasklet(unsigned long pdev) +{ + struct efhw_nic *nic = (struct efhw_nic *)pdev; + + EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); + + efhw_keventq_poll(nic, &nic->evq[0]); + EFRM_TRACE("tasklet complete"); +} + +/**************************************************************************** + * + * char driver specific interrupt callbacks -- run at hard IRQL + * + ****************************************************************************/ +static void efrm_handle_eventq_irq(struct efhw_nic *nic, int evq) +{ + /* NB. The interrupt must have already been acked (for legacy mode). */ + + EFRM_TRACE("%s: starting tasklet", __FUNCTION__); + EFRM_ASSERT(!(nic->flags & NIC_FLAG_NO_INTERRUPT)); + + tasklet_schedule(&linux_efhw_nic(nic)->tasklet); +} + +/* A count of how many NICs this driver knows about. */ +static int n_nics_probed; + +/**************************************************************************** + * + * efrm_nic_add: add the NIC to the resource driver + * + * NOTE: the flow of control through this routine is quite subtle + * because of the number of operations that can fail. We therefore + * take the apporaching of keeping the return code (rc) variable + * accurate, and only do operations while it is non-negative. Tear down + * is done at the end if rc is negative, depending on what has been set up + * by that point. + * + * So basically just make sure that any code you add checks rc>=0 before + * doing any work and you'll be fine. + * + ****************************************************************************/ +int +efrm_nic_add(struct pci_dev *dev, unsigned flags, const uint8_t *mac_addr, + struct linux_efhw_nic **lnic_out, spinlock_t *reg_lock, + int bt_min, int bt_max, + const struct vi_resource_dimensions *res_dim) +{ + struct linux_efhw_nic *lnic = NULL; + struct efhw_nic *nic = NULL; + int count = 0, rc = 0, resources_init = 0; + int constructed = 0; + int registered_nic = 0; + int buffers_allocated = 0; + static unsigned nic_index; /* = 0; */ + + EFRM_TRACE("%s: device detected (Slot '%s', IRQ %d)", __FUNCTION__, + pci_name(dev) ? pci_name(dev) : "?", dev->irq); + + /* Ensure that we have room for the new adapter-structure. */ + if (efrm_nic_table.nic_count == EFHW_MAX_NR_DEVS) { + EFRM_WARN("%s: WARNING: too many devices", __FUNCTION__); + rc = -ENOMEM; + goto failed; + } + + if (n_nics_probed == 0) { + rc = efrm_resources_init(res_dim, bt_min, bt_max); + if (rc != 0) + goto failed; + resources_init = 1; + } + + /* Allocate memory for the new adapter-structure. */ + lnic = kmalloc(sizeof(*lnic), GFP_KERNEL); + if (lnic == NULL) { + EFRM_ERR("%s: ERROR: failed to allocate memory", __FUNCTION__); + rc = -ENOMEM; + goto failed; + } + memset(lnic, 0, sizeof(*lnic)); + nic = &lnic->nic; + + lnic->ev_handlers = &ev_handler; + + /* OS specific hardware mappings */ + rc = linux_efrm_nic_ctor(lnic, dev, reg_lock, flags, nic_options); + if (rc < 0) { + EFRM_ERR("%s: ERROR: initialisation failed", __FUNCTION__); + goto failed; + } + + constructed = 1; + + /* Tell the driver about the NIC - this needs to be done before the + resources managers get created below. Note we haven't initialised + the hardware yet, and I don't like doing this before the perhaps + unreliable hardware initialisation. However, there's quite a lot + of code to review if we wanted to hardware init before bringing + up the resource managers. */ + rc = efrm_driver_register_nic(nic, nic_index++); + if (rc < 0) { + EFRM_ERR("%s: cannot register nic %d with nic error code %d", + __FUNCTION__, efrm_nic_table.nic_count, rc); + goto failed; + } + registered_nic = 1; + + rc = efrm_nic_buffer_table_alloc(nic); + if (rc < 0) + goto failed; + buffers_allocated = 1; + + /****************************************************/ + /* hardware bringup */ + /****************************************************/ + /* Detecting hardware can be a slightly unreliable process; + we want to make sure that we maximise our chances, so we + loop a few times until all is good. */ + for (count = 0; count < max_hardware_init_repeats; count++) { + rc = efhw_nic_init_hardware(nic, &ev_handler, mac_addr); + if (rc >= 0) + break; + + /* pain */ + EFRM_ERR + ("error - hardware initialisation failed code %d, " + "attempt %d of %d", rc, count + 1, + max_hardware_init_repeats); + } + if (rc < 0) + goto failed; + + tasklet_init(&lnic->tasklet, efrm_tasklet, (ulong)nic); + + /* set up interrupt handlers (hard-irq) */ + nic->irq_handler = &efrm_handle_eventq_irq; + + /* this device can now take management interrupts */ + if (do_irq && !(nic->flags & NIC_FLAG_NO_INTERRUPT)) { + rc = linux_efrm_irq_ctor(lnic); + if (rc < 0) { + EFRM_ERR("Interrupt initialisation failed (%d)", rc); + goto failed; + } + efhw_nic_set_interrupt_moderation(nic, 0, irq_moderation); + efhw_nic_interrupt_enable(nic, 0); + } + EFRM_TRACE("interrupts are %sregistered", do_irq ? "" : "not "); + +#if EFRM_HAVE_IOMMU_LOCK + /* Bug 4560: We need the lock if there is memory which cannot be + * accessed by the card and there is an IOMMU to access it. In that + * case, the kernel will use the IOMMU to access the high memory. */ + if ((dev->dma_mask >> PAGE_SHIFT) < max_pfn && !EFRM_NO_IOMMU) + efrm_need_iommu_lock = 1; +#endif + + *lnic_out = lnic; + EFRM_ASSERT(rc == 0); + ++n_nics_probed; + return 0; + +failed: + if (buffers_allocated) + efrm_nic_buffer_table_free(nic); + if (registered_nic) + efrm_driver_unregister_nic(nic); + if (constructed) + linux_efrm_nic_dtor(lnic); + kfree(lnic); /* safe in any case */ + if (resources_init) + efrm_resources_fini(); + return rc; +} + +/**************************************************************************** + * + * efrm_nic_del: Remove the nic from the resource driver structures + * + ****************************************************************************/ +void efrm_nic_del(struct linux_efhw_nic *lnic) +{ + struct efhw_nic *nic = &lnic->nic; + + EFRM_TRACE("%s:", __FUNCTION__); + EFRM_ASSERT(nic); + + efrm_driver_unregister_nic(nic); + + /* + * Synchronise here with any running ISR. + * Remove the OS handler. There should be no IRQs being generated + * by our NIC at this point. + */ + if (efhw_nic_have_functional_units(nic)) { + efhw_nic_close_interrupts(nic); + linux_efrm_irq_dtor(lnic); + tasklet_kill(&lnic->tasklet); + } + + /* Close down hardware and free resources. */ + linux_efrm_nic_dtor(lnic); + kfree(lnic); + + if (--n_nics_probed == 0) + efrm_resources_fini(); + + EFRM_TRACE("NIC teardown: Done"); +} + +/**************************************************************************** + * + * init_module: register as a PCI driver. + * + ****************************************************************************/ +static int init_sfc_resource(void) +{ + int rc = 0; + + EFRM_TRACE("%s: RESOURCE driver starting", __FUNCTION__); + + rc = efrm_driver_ctor(); + if (rc < 0) { + EFRM_ERR("%s: efrm_driver_ctor: error %d", __FUNCTION__, rc); + goto fail_driver_ctor; + } + + /* Register the driver so that our 'probe' function is called for + * each EtherFabric device in the system. + */ + rc = efrm_driverlink_register(); + if (rc == -ENODEV) + EFRM_ERR("%s: no devices found", __FUNCTION__); + if (rc < 0) + goto failed_driverlink; + + if (efrm_install_proc_entries() != 0) { + /* Do not fail, but print a warning */ + EFRM_WARN("%s: WARNING: failed to install /proc entries", + __FUNCTION__); + } + + return 0; + +failed_driverlink: + /* No need to release resource managers here since they register + * destructors with the driver. */ + efrm_driver_dtor(); +fail_driver_ctor: + EFRM_ASSERT(rc != 0); + return rc; +} + +/**************************************************************************** + * + * cleanup_module: module-removal entry-point + * + ****************************************************************************/ +static void cleanup_sfc_resource(void) +{ + efrm_uninstall_proc_entries(); + + efrm_driverlink_unregister(); + + /* Clean up char-driver specific initialisation. + - driver dtor can use both work queue and buffer table entries */ + efrm_driver_dtor(); + + EFRM_TRACE("unloaded"); +} + +module_init(init_sfc_resource); +module_exit(cleanup_sfc_resource); Index: head-2008-03-17/drivers/net/sfc/sfc_resource/resource_manager.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/resource_manager.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,263 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains generic code for resources and resource managers. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include + +/********************************************************************** + * Internal stuff. + */ + +#define EFRM_RM_TABLE_SIZE_INIT 256 + +static int grow_table(struct efrm_resource_manager *rm, unsigned min_size) +{ + irq_flags_t lock_flags; + struct efrm_resource **table, **old_table; + unsigned new_size; + + EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm); + + spin_lock_irqsave(&rm->rm_lock, lock_flags); + + /* Check whether the size of the table increased whilst the lock was + * dropped. */ + if (min_size <= rm->rm_table_size) { + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + return 0; + } + + new_size = rm->rm_table_size << 1; + if (new_size < min_size) + new_size = min_size; + + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + if (in_atomic()) { + EFRM_WARN("%s: in_atomic in grow_table()", __FUNCTION__); + EFRM_WARN("%s: allocating %u bytes", __FUNCTION__, + (unsigned)(new_size * + sizeof(struct efrm_resource *))); + return -ENOMEM; + } + + table = + (struct efrm_resource **)vmalloc(new_size * + sizeof(struct efrm_resource *)); + spin_lock_irqsave(&rm->rm_lock, lock_flags); + + if (table == 0) { + EFRM_ERR("%s: out of memory in grow_table()", __FUNCTION__); + EFRM_ERR("%s: allocating %u bytes", __FUNCTION__, + (unsigned)(new_size * + sizeof(struct efrm_resource *))); + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + return -ENOMEM; + } + + /* Could have got bigger while we dropped the lock... */ + if (new_size <= rm->rm_table_size) { + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + vfree(table); + return 0; + } + + memcpy(table, rm->rm_table, rm->rm_table_size * sizeof(*table)); + memset(table + rm->rm_table_size, 0, + sizeof(*table) * (new_size - rm->rm_table_size)); + /* remember old table so we can free the + memory after we drop the lock (bug 1040) */ + old_table = rm->rm_table; + rm->rm_table = table; + rm->rm_table_size = new_size; + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + vfree(old_table); + + return 0; +} + +/********************************************************************** + * struct efrm_resource_manager + */ + +void efrm_resource_manager_dtor(struct efrm_resource_manager *rm) +{ + EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm); + + /* call destructor */ + EFRM_DO_DEBUG(if (rm->rm_resources) + EFRM_ERR("%s: %s leaked %d resources", + __FUNCTION__, rm->rm_name, rm->rm_resources)); + EFRM_ASSERT(rm->rm_resources == 0); + + rm->rm_dtor(rm); + + /* clear out things built by efrm_resource_manager_ctor */ + spin_lock_destroy(&rm->rm_lock); + vfree(rm->rm_table); + + /* and the free the memory */ + EFRM_DO_DEBUG(memset(rm, 0, sizeof(*rm))); + kfree(rm); +} + +/* Construct a resource manager. Resource managers are singletons. */ +int +efrm_resource_manager_ctor(struct efrm_resource_manager *rm, + void (*dtor)(struct efrm_resource_manager *), + const char *name, unsigned type, + int initial_table_size) +{ + EFRM_ASSERT(rm); + EFRM_ASSERT(dtor); + + rm->rm_name = name; + EFRM_DO_DEBUG(rm->rm_type = type); + rm->rm_dtor = dtor; + spin_lock_init(&rm->rm_lock); + rm->rm_resources = 0; + rm->rm_resources_hiwat = 0; + + /* if not set then pick a number */ + rm->rm_table_size = (initial_table_size) ? + initial_table_size : EFRM_RM_TABLE_SIZE_INIT; + + rm->rm_table = vmalloc(rm->rm_table_size * + sizeof(struct efrm_resource *)); + + if (rm->rm_table == 0) { + spin_lock_destroy(&rm->rm_lock); + return -ENOMEM; + } + memset(rm->rm_table, 0, sizeof(*rm->rm_table) * rm->rm_table_size); + + EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm); + return 0; +} + +int efrm_resource_manager_insert(struct efrm_resource *rs) +{ + irq_flags_t lock_flags; + struct efrm_resource_manager *rm; + int instance = EFRM_RESOURCE_INSTANCE(rs->rs_handle); + + EFRM_ASSERT(EFRM_RESOURCE_TYPE(rs->rs_handle) < EFRM_RESOURCE_NUM); + rm = efrm_rm_table[EFRM_RESOURCE_TYPE(rs->rs_handle)]; + EFRM_ASSERT(EFRM_RESOURCE_TYPE(rs->rs_handle) == rm->rm_type); + EFRM_RESOURCE_ASSERT_VALID(rs, 0); + + /* Put an entry in the resource table. */ + spin_lock_irqsave(&rm->rm_lock, lock_flags); + if ((unsigned)instance >= rm->rm_table_size) { + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + if (grow_table(rm, instance + 1) < 0) + return -ENOMEM; + spin_lock_irqsave(&rm->rm_lock, lock_flags); + } + EFRM_ASSERT(rm->rm_table_size > (unsigned)instance); + EFRM_ASSERT(rm->rm_table[instance] == NULL); + rm->rm_table[instance] = rs; + rm->rm_resources++; + if (rm->rm_resources > rm->rm_resources_hiwat) + rm->rm_resources_hiwat = rm->rm_resources; + + /* Put the resource in the linked list. */ + /* ?? broken list_add(&rm->rm_resources, &rs->rs_link); */ + /* DJR wrote that it causes problem on driver unload, and DR tried + * it and saw (probably) this cause an assertion failure due to a + * bad link structure in + * /runbench/results/2005/09/22/0_DupTester_15-16-46 */ + + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + + return 0; +} + +bool __efrm_resource_ref_count_zero(unsigned type, unsigned instance) +{ + /* This is rather nasty because when a resource's ref count goes to + * zero there is still a pointer to it in the [rm_table]. Thus + * arriving here does not guarantee that we have exclusive access + * to the resource and can free it. In fact the resource may + * already have been freed by another thread (after we dropped our + * ref, but before arriving here). + * + * At this point the only pointers to this resource should be [rs] + * and the one in [rm_table]. EXCEPT: Someone could have got in + * and looked-up the resource in the table before we got the lock. + * In this case the ref will have been hiked again. + * + * Therefore, if ref count is non-zero here, we shouldn't do + * anything, as someone else holds a ref to the resource, and will + * eventually release it. + * + * Otherwise, we zero-out the table entry. Therefore we have the + * only pointer to the resource, and can kill it safely. + */ + struct efrm_resource_manager *rm = efrm_rm_table[type]; + irq_flags_t lock_flags; + struct efrm_resource *rs; + bool do_free = false; + + EFRM_TRACE("efrm_resource_ref_count_zero: type=%d instance=%d", + rm->rm_type, instance); + + EFRM_RESOURCE_MANAGER_ASSERT_VALID(rm); + EFRM_ASSERT(rm->rm_table_size > instance); + + spin_lock_irqsave(&rm->rm_lock, lock_flags); + + rs = rm->rm_table[instance]; + if (rs != NULL) { + do_free = atomic_read(&rs->rs_ref_count) == 0; + if (do_free) { + EFRM_ASSERT(rm->rm_resources > 0); + --rm->rm_resources; + rm->rm_table[instance] = 0; + } + } + + spin_unlock_irqrestore(&rm->rm_lock, lock_flags); + + return do_free; +} +EXPORT_SYMBOL(__efrm_resource_ref_count_zero); + +/* + * vi: sw=8:ai:aw + */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/resources.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/resources.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,94 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains resource managers initialisation functions. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include + +int +efrm_resources_init(const struct vi_resource_dimensions *vi_res_dim, + int buffer_table_min, int buffer_table_max) +{ + int i, rc; + + rc = efrm_buffer_table_ctor(buffer_table_min, buffer_table_max); + if (rc != 0) + return rc; + + /* Create resources in the correct order */ + for (i = 0; i < EFRM_RESOURCE_NUM; ++i) { + struct efrm_resource_manager **rmp = &efrm_rm_table[i]; + + EFRM_ASSERT(*rmp == NULL); + switch (i) { + case EFRM_RESOURCE_VI: + rc = efrm_create_vi_resource_manager(rmp, + vi_res_dim); + break; + case EFRM_RESOURCE_FILTER: + rc = efrm_create_filter_resource_manager(rmp); + break; + case EFRM_RESOURCE_IOBUFSET: + rc = efrm_create_iobufset_resource_manager(rmp); + break; + default: + rc = 0; + break; + } + + if (rc < 0) { + EFRM_ERR("%s: failed type=%d (%d)", + __FUNCTION__, i, rc); + efrm_buffer_table_dtor(); + return rc; + } + } + + return 0; +} + +void efrm_resources_fini(void) +{ + int i; + + for (i = EFRM_RESOURCE_NUM - 1; i >= 0; --i) + if (efrm_rm_table[i]) { + efrm_resource_manager_dtor(efrm_rm_table[i]); + efrm_rm_table[i] = NULL; + } + + efrm_buffer_table_dtor(); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_alloc.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_alloc.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,876 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains allocation of VI resources. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/*** Data definitions ****************************************************/ + +static const char *dmaq_names[] = { "TX", "RX" }; + +struct vi_resource_manager *efrm_vi_manager; + +/*** Forward references **************************************************/ + +static int +efrm_vi_resource_alloc_or_free(int alloc, struct vi_resource *evq_virs, + uint16_t vi_flags, int32_t evq_capacity, + int32_t txq_capacity, int32_t rxq_capacity, + uint8_t tx_q_tag, uint8_t rx_q_tag, + struct vi_resource **virs_in_out); + +/*** Reference count handling ********************************************/ + +static inline void efrm_vi_rm_get_ref(struct vi_resource *virs) +{ + atomic_inc(&virs->evq_refs); +} + +static inline void efrm_vi_rm_drop_ref(struct vi_resource *virs) +{ + EFRM_ASSERT(atomic_read(&virs->evq_refs) != 0); + if (atomic_dec_and_test(&virs->evq_refs)) + efrm_vi_resource_alloc_or_free(false, NULL, 0, 0, 0, 0, 0, 0, + &virs); +} + +/*** Instance numbers ****************************************************/ + +static inline int efrm_vi_rm_alloc_id(uint16_t vi_flags, int32_t evq_capacity) +{ + irq_flags_t lock_flags; + int instance; + int rc; + + if (efrm_nic_table.a_nic == NULL) /* ?? FIXME: surely not right */ + return -ENODEV; + + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + + /* Falcon A1 RX phys addr wierdness. */ + if (efrm_nic_table.a_nic->devtype.variant == 'A' && + (vi_flags & EFHW_VI_RX_PHYS_ADDR_EN)) { + if (vi_flags & EFHW_VI_JUMBO_EN) { + /* Falcon-A cannot do phys + scatter. */ + EFRM_WARN + ("%s: falcon-A does not support phys+scatter mode", + __FUNCTION__); + instance = -1; + } else if (efrm_vi_manager->iscsi_dmaq_instance_is_free + && evq_capacity == 0) { + /* Falcon-A has a single RXQ that gives the correct + * semantics for physical addressing. However, it + * happens to have the same instance number as the + * 'char' event queue, so we cannot also hand out + * the event queue. */ + efrm_vi_manager->iscsi_dmaq_instance_is_free = false; + instance = FALCON_A1_ISCSI_DMAQ; + } else { + EFRM_WARN("%s: iSCSI receive queue not free", + __FUNCTION__); + instance = -1; + } + goto unlock_out; + } + + if (vi_flags & EFHW_VI_RM_WITH_INTERRUPT) { + rc = __kfifo_get(efrm_vi_manager->instances_with_interrupt, + (unsigned char *)&instance, sizeof(instance)); + if (rc != sizeof(instance)) { + EFRM_ASSERT(rc == 0); + instance = -1; + } + goto unlock_out; + } + + /* Otherwise a normal run-of-the-mill VI. */ + rc = __kfifo_get(efrm_vi_manager->instances_with_timer, + (unsigned char *)&instance, sizeof(instance)); + if (rc != sizeof(instance)) { + EFRM_ASSERT(rc == 0); + instance = -1; + } + +unlock_out: + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); + return instance; +} + +static void efrm_vi_rm_free_id(int instance) +{ + irq_flags_t lock_flags; + struct kfifo *instances; + + if (efrm_nic_table.a_nic == NULL) /* ?? FIXME: surely not right */ + return; + + if (efrm_nic_table.a_nic->devtype.variant == 'A' && + instance == FALCON_A1_ISCSI_DMAQ) { + EFRM_ASSERT(efrm_vi_manager->iscsi_dmaq_instance_is_free == + false); + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + efrm_vi_manager->iscsi_dmaq_instance_is_free = true; + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, + lock_flags); + } else { + if (instance >= efrm_vi_manager->with_timer_base && + instance < efrm_vi_manager->with_timer_limit) { + instances = efrm_vi_manager->instances_with_timer; + } else { + EFRM_ASSERT(instance >= + efrm_vi_manager->with_interrupt_base); + EFRM_ASSERT(instance < + efrm_vi_manager->with_interrupt_limit); + instances = efrm_vi_manager->instances_with_interrupt; + } + + EFRM_VERIFY_EQ(kfifo_put(instances, (unsigned char *)&instance, + sizeof(instance)), sizeof(instance)); + } +} + +/*** Queue sizes *********************************************************/ + +/* NB. This should really take a nic as an argument, but that makes + * the buffer table allocation difficult. */ +uint32_t efrm_vi_rm_evq_bytes(struct vi_resource *virs + /*,struct efhw_nic *nic */ ) +{ + return virs->evq_capacity * sizeof(efhw_event_t); +} +EXPORT_SYMBOL(efrm_vi_rm_evq_bytes); + +/* NB. This should really take a nic as an argument, but that makes + * the buffer table allocation difficult. */ +uint32_t efrm_vi_rm_txq_bytes(struct vi_resource *virs + /*,struct efhw_nic *nic */ ) +{ + return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] * + FALCON_DMA_TX_DESC_BYTES; +} +EXPORT_SYMBOL(efrm_vi_rm_txq_bytes); + +/* NB. This should really take a nic as an argument, but that makes + * the buffer table allocation difficult. */ +uint32_t efrm_vi_rm_rxq_bytes(struct vi_resource *virs + /*,struct efhw_nic *nic */ ) +{ + uint32_t bytes_per_desc = ((virs->flags & EFHW_VI_RX_PHYS_ADDR_EN) + ? FALCON_DMA_RX_PHYS_DESC_BYTES + : FALCON_DMA_RX_BUF_DESC_BYTES); + return virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] * bytes_per_desc; +} +EXPORT_SYMBOL(efrm_vi_rm_rxq_bytes); + +static int choose_size(int size_rq, unsigned sizes) +{ + int size; + + /* size_rq < 0 means default, but we interpret this as 'minimum'. */ + + for (size = 256;; size <<= 1) + if ((sizes & ~((size - 1) | size)) == 0) + return -1; + else if ((size & sizes) && size >= size_rq) + return size; +} + +static int +efrm_vi_rm_adjust_alloc_request(struct vi_resource *virs, struct efhw_nic *nic) +{ + int capacity; + + EFRM_ASSERT(nic->efhw_func); + + if (virs->evq_capacity) { + capacity = choose_size(virs->evq_capacity, nic->evq_sizes); + if (capacity < 0) { + EFRM_ERR("vi_resource: bad evq size %d (supported=%x)", + virs->evq_capacity, nic->evq_sizes); + return -E2BIG; + } + virs->evq_capacity = capacity; + } + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) { + capacity = + choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX], + nic->txq_sizes); + if (capacity < 0) { + EFRM_ERR("vi_resource: bad txq size %d (supported=%x)", + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX], + nic->txq_sizes); + return -E2BIG; + } + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = capacity; + } + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) { + capacity = + choose_size(virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX], + nic->rxq_sizes); + if (capacity < 0) { + EFRM_ERR("vi_resource: bad rxq size %d (supported=%x)", + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX], + nic->rxq_sizes); + return -E2BIG; + } + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = capacity; + } + + return 0; +} + +/* remove the reference to the event queue in this VI resource and decrement + the event queue's use count */ +static inline void efrm_vi_rm_detach_evq(struct vi_resource *virs) +{ + struct vi_resource *evq_virs; + + EFRM_ASSERT(virs != NULL); + + evq_virs = virs->evq_virs; + + if (evq_virs != NULL) { + virs->evq_virs = NULL; + if (evq_virs == virs) { + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT + " had internal event queue ", __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); + } else { + efrm_vi_rm_drop_ref(evq_virs); + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " had event queue " + EFRM_RESOURCE_FMT, __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), + EFRM_RESOURCE_PRI_ARG(evq_virs->rs. + rs_handle)); + } + } else { + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT + " had no event queue (nothing to do)", + __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); + } +} + +/*** Buffer Table allocations ********************************************/ + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) +static int +efrm_vi_rm_alloc_or_free_buffer_table(struct vi_resource *virs, bool is_alloc) +{ + uint32_t bytes; + int page_order; + int rc; + + if (!is_alloc) + goto destroy; + + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) { + bytes = efrm_vi_rm_txq_bytes(virs); + page_order = get_order(bytes); + rc = efrm_buffer_table_alloc(page_order, + (virs->dmaq_buf_tbl_alloc + + EFRM_VI_RM_DMA_QUEUE_TX)); + if (rc != 0) { + EFRM_TRACE + ("%s: Error %d allocating TX buffer table entry", + __FUNCTION__, rc); + goto fail_txq_alloc; + } + } + + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) { + bytes = efrm_vi_rm_rxq_bytes(virs); + page_order = get_order(bytes); + rc = efrm_buffer_table_alloc(page_order, + (virs->dmaq_buf_tbl_alloc + + EFRM_VI_RM_DMA_QUEUE_RX)); + if (rc != 0) { + EFRM_TRACE + ("%s: Error %d allocating RX buffer table entry", + __FUNCTION__, rc); + goto fail_rxq_alloc; + } + } + return 0; + +destroy: + rc = 0; + + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) { + efrm_buffer_table_free(&virs-> + dmaq_buf_tbl_alloc + [EFRM_VI_RM_DMA_QUEUE_RX]); + } +fail_rxq_alloc: + + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]) { + efrm_buffer_table_free(&virs-> + dmaq_buf_tbl_alloc + [EFRM_VI_RM_DMA_QUEUE_TX]); + } +fail_txq_alloc: + + return rc; +} + +#endif /* defined(__CI_HARDWARE_CONFIG_FALCON__) */ + +/*** Per-NIC allocations *************************************************/ + +static inline int +efrm_vi_rm_init_evq(struct vi_resource *virs, int nic_index) +{ + int rc; + struct efhw_nic *nic = efrm_nic_table.nic[nic_index]; + int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + struct eventq_resource_hardware *evq_hw = + &virs->nic_info[nic_index].evq_pages; + uint32_t buf_bytes = efrm_vi_rm_evq_bytes(virs); + + if (virs->evq_capacity == 0) + return 0; + evq_hw->capacity = virs->evq_capacity; + + /* Allocate buffer table entries to map onto the iobuffer. This + * currently allocates its own buffer table entries on Falcon which is + * a bit wasteful on a multi-NIC system. */ + evq_hw->buf_tbl_alloc.base = (unsigned)-1; + rc = efrm_buffer_table_alloc(get_order(buf_bytes), + &evq_hw->buf_tbl_alloc); + if (rc < 0) { + EFHW_WARN("%s: failed (%d) to alloc %d buffer table entries", + __FUNCTION__, rc, get_order(buf_bytes)); + return rc; + } + + /* Allocate the event queue memory. */ + rc = efhw_nic_event_queue_alloc_iobuffer(nic, evq_hw, instance, + buf_bytes); + if (rc != 0) { + EFRM_ERR("%s: Error allocating iobuffer: %d", __FUNCTION__, rc); + efrm_buffer_table_free(&evq_hw->buf_tbl_alloc); + return rc; + } + + /* Initialise the event queue hardware */ + efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity, + efhw_iopages_dma_addr(&evq_hw->iobuff) + + evq_hw->iobuff_off, + evq_hw->buf_tbl_alloc.base); + + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " capacity=%u", __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), + virs->evq_capacity); + +#if defined(__ia64__) + /* Page size may be large, so for now just increase the + * size of the requested evq up to a round number of + * pages + */ + buf_bytes = CI_ROUND_UP(buf_bytes, PAGE_SIZE); +#endif + EFRM_ASSERT(buf_bytes % PAGE_SIZE == 0); + + virs->mem_mmap_bytes += buf_bytes; + + return 0; +} + +static inline void +efrm_vi_rm_fini_evq(struct vi_resource *virs, int nic_index) +{ + struct efhw_nic *nic = efrm_nic_table.nic[nic_index]; + int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index]; + + if (virs->evq_capacity == 0) + return; + + /* Zero the timer-value for this queue. + And Tell NIC to stop using this event queue. */ + efhw_nic_event_queue_disable(nic, instance, 0); + + if (nic_info->evq_pages.buf_tbl_alloc.base != (unsigned)-1) + efrm_buffer_table_free(&nic_info->evq_pages.buf_tbl_alloc); + + efhw_iopages_free(nic, &nic_info->evq_pages.iobuff); +} + +/*! FIXME: we should make sure this number is never zero (=> unprotected) */ +/*! FIXME: put this definition in a relevant header (e.g. as (evqid)+1) */ +#define EFAB_EVQ_OWNER_ID(evqid) ((evqid)) + +void +efrm_vi_rm_init_dmaq(struct vi_resource *virs, int queue_type, + struct efhw_nic *nic) +{ + int instance; + struct vi_resource *evq_virs; + int evq_instance; + efhw_buffer_addr_t buf_addr; + + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + evq_virs = virs->evq_virs; + evq_instance = EFRM_RESOURCE_INSTANCE(evq_virs->rs.rs_handle); + + buf_addr = virs->dmaq_buf_tbl_alloc[queue_type].base; + + if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) { + efhw_nic_dmaq_tx_q_init(nic, + instance, /* dmaq */ + evq_instance, /* evq */ + EFAB_EVQ_OWNER_ID(evq_instance), /* owner */ + virs->dmaq_tag[queue_type], /* tag */ + virs->dmaq_capacity[queue_type], /* size of queue */ + buf_addr, /* buffer index */ + virs->flags); /* user specified Q attrs */ + } else { + efhw_nic_dmaq_rx_q_init(nic, + instance, /* dmaq */ + evq_instance, /* evq */ + EFAB_EVQ_OWNER_ID(evq_instance), /* owner */ + virs->dmaq_tag[queue_type], /* tag */ + virs->dmaq_capacity[queue_type], /* size of queue */ + buf_addr, /* buffer index */ + virs->flags); /* user specified Q attrs */ + } +} + +static int +efrm_vi_rm_init_or_fini_dmaq(struct vi_resource *virs, + int queue_type, int init, int nic_index) +{ + int rc; + struct efhw_nic *nic = efrm_nic_table.nic[nic_index]; + int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + uint32_t buf_bytes; + struct vi_resource *evq_virs; + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + struct vi_resource_nic_info *nic_info = &virs->nic_info[nic_index]; + int page_order; + uint32_t num_pages; + efhw_iopages_t *iobuff; +#endif + + if (!init) + goto destroy; + + /* Ignore disabled queues. */ + if (virs->dmaq_capacity[queue_type] == 0) { + if (queue_type == EFRM_VI_RM_DMA_QUEUE_TX) + efhw_nic_dmaq_tx_q_disable(nic, instance); + else + efhw_nic_dmaq_rx_q_disable(nic, instance); + return 0; + } + + buf_bytes = (queue_type == EFRM_VI_RM_DMA_QUEUE_TX + ? efrm_vi_rm_txq_bytes(virs) + : efrm_vi_rm_rxq_bytes(virs)); + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + page_order = get_order(buf_bytes); + + rc = efhw_iopages_alloc(nic, &nic_info->dmaq_pages[queue_type], + page_order); + if (rc != 0) { + EFRM_ERR("%s: Failed to allocate %s DMA buffer.", __FUNCTION__, + dmaq_names[queue_type]); + goto fail_iopages; + } + + num_pages = 1 << page_order; + iobuff = &nic_info->dmaq_pages[queue_type]; + efhw_nic_buffer_table_set_n(nic, + virs->dmaq_buf_tbl_alloc[queue_type].base, + efhw_iopages_dma_addr(iobuff), + EFHW_NIC_PAGE_SIZE, 0, num_pages, 0); + + falcon_nic_buffer_table_confirm(nic); + + virs->mem_mmap_bytes += round_up(buf_bytes, PAGE_SIZE); +#endif /* __CI_HARDWARE_CONFIG_FALCON__ */ + + evq_virs = virs->evq_virs; + EFRM_ASSERT(evq_virs); + + /* Make sure there is an event queue. */ + if (evq_virs->evq_capacity <= 0) { + EFRM_ERR("%s: Cannot use empty event queue for %s DMA", + __FUNCTION__, dmaq_names[queue_type]); + rc = -EINVAL; + goto fail_evq; + } + + efrm_vi_rm_init_dmaq(virs, queue_type, nic); + + return 0; + +destroy: + rc = 0; + + /* Ignore disabled queues. */ + if (virs->dmaq_capacity[queue_type] == 0) + return 0; + + /* No need to disable the queue here. Nobody is using it anyway. */ + +fail_evq: +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + efhw_iopages_free(nic, &nic_info->dmaq_pages[queue_type]); +fail_iopages: +#endif + + return rc; +} + +static int +efrm_vi_rm_init_or_fini_nic(struct vi_resource *virs, int init, int nic_index) +{ + struct vi_resource *evq_virs; + int rc; +#if defined(__CI_HARDWARE_CONFIG_FALCON__) +#ifndef NDEBUG + int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); +#endif +#endif + + if (!init) + goto destroy; + + evq_virs = virs->evq_virs; + if (evq_virs != virs) { + if (!efrm_nic_set_read(&evq_virs->nic_set, nic_index)) { + /* Ignore this NIC. It's not supported by the event + * queue. */ + return 0; + } + } + + rc = efrm_vi_rm_init_evq(virs, nic_index); + if (rc != 0) + goto fail_evq; + + rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX, + init, nic_index); + if (rc != 0) + goto fail_txq; + + rc = efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX, + init, nic_index); + if (rc != 0) + goto fail_rxq; + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + /* Allocate space for the control page. */ + EFRM_ASSERT(falcon_tx_dma_page_offset(instance) < PAGE_SIZE); + EFRM_ASSERT(falcon_rx_dma_page_offset(instance) < PAGE_SIZE); + EFRM_ASSERT(falcon_timer_page_offset(instance) < PAGE_SIZE); + virs->bar_mmap_bytes += PAGE_SIZE; +#endif + + /* Mark the NIC as having been initialised. */ + efrm_nic_set_write(&virs->nic_set, nic_index, true); + + return 0; + +destroy: + rc = 0; + + efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX, + false, nic_index); +fail_rxq: + + efrm_vi_rm_init_or_fini_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX, + false, nic_index); +fail_txq: + + efrm_vi_rm_fini_evq(virs, nic_index); +fail_evq: + + /* Mark the NIC as having been finalised. */ + efrm_nic_set_write(&virs->nic_set, nic_index, false); + EFRM_ASSERT(rc != 0 || !init); + + return rc; +} + +static int +efrm_vi_resource_alloc_or_free(int alloc, struct vi_resource *evq_virs, + uint16_t vi_flags, int32_t evq_capacity, + int32_t txq_capacity, int32_t rxq_capacity, + uint8_t tx_q_tag, uint8_t rx_q_tag, + struct vi_resource **virs_in_out) +{ + struct vi_resource *virs; + int rc; + int instance; + struct efhw_nic *nic; + int nic_i; + + EFRM_ASSERT(virs_in_out); + EFRM_ASSERT(efrm_vi_manager); + EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm); + + if (!alloc) + goto destroy; + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + rx_q_tag &= (1 << TX_DESCQ_LABEL_WIDTH) - 1; + tx_q_tag &= (1 << RX_DESCQ_LABEL_WIDTH) - 1; +#endif + + virs = kmalloc(sizeof(*virs), GFP_KERNEL); + if (virs == NULL) { + EFRM_ERR("%s: Error allocating VI resource object", + __FUNCTION__); + rc = -ENOMEM; + goto fail_alloc; + } + memset(virs, 0, sizeof(*virs)); + + /* Some macros make the assumption that the struct efrm_resource is + * the first member of a struct vi_resource. */ + EFRM_ASSERT(&virs->rs == (struct efrm_resource *) (virs)); + + instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity); + if (instance < 0) { + /* Clear out the close list... */ + efrm_vi_rm_salvage_flushed_vis(); + instance = efrm_vi_rm_alloc_id(vi_flags, evq_capacity); + if (instance >= 0) + EFRM_TRACE("%s: Salvaged a closed VI.", __FUNCTION__); + } + + if (instance < 0) { + /* Could flush resources and try again here. */ + EFRM_ERR("%s: Out of appropriate VI resources", __FUNCTION__); + rc = -EBUSY; + goto fail_alloc_id; + } + + EFRM_TRACE("%s: new VI ID %d", __FUNCTION__, instance); + efrm_resource_init(&virs->rs, EFRM_RESOURCE_VI, instance); + + /* Start with one reference. Any external VIs using the EVQ of this + * resource will increment this reference rather than the resource + * reference to avoid DMAQ flushes from waiting for other DMAQ + * flushes to complete. When the resource reference goes to zero, + * the DMAQ flush happens. When the flush completes, this reference + * is decremented. When this reference reaches zero, the instance + * is freed. */ + atomic_set(&virs->evq_refs, 1); + + efrm_nic_set_clear(&virs->nic_set); + + virs->bar_mmap_bytes = 0; + virs->mem_mmap_bytes = 0; + virs->evq_capacity = evq_capacity; + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] = txq_capacity; + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] = rxq_capacity; + virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_TX] = tx_q_tag; + virs->dmaq_tag[EFRM_VI_RM_DMA_QUEUE_RX] = rx_q_tag; + virs->flags = vi_flags; + + INIT_LIST_HEAD(&virs->tx_flush_link); + INIT_LIST_HEAD(&virs->rx_flush_link); + efrm_nic_set_clear(&virs->tx_flush_nic_set); + efrm_nic_set_clear(&virs->rx_flush_nic_set); + + memset(&efrm_vi_manager->evq_infos[instance], 0, + sizeof(struct vi_resource_evq_info)); + efrm_vi_manager->evq_infos[instance].evq_virs = virs; + + /* Adjust the queue sizes. */ + rc = 0; + EFRM_FOR_EACH_NIC(nic_i, nic) + if (rc == 0) + rc = efrm_vi_rm_adjust_alloc_request(virs, nic); + if (rc != 0) + goto fail_adjust_request; + + /* Attach the EVQ early so that we can ensure that the NIC sets + * match. */ + if (evq_virs == NULL) { + evq_virs = virs; + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT + " has no external event queue", __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); + } else { + /* Make sure the resource managers are the same. */ + if (EFRM_RESOURCE_TYPE(evq_virs->rs.rs_handle) != + EFRM_RESOURCE_VI) { + EFRM_ERR("%s: Mismatched owner for event queue VI " + EFRM_RESOURCE_FMT, __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle)); + return -EINVAL; + } + EFRM_ASSERT(atomic_read(&evq_virs->evq_refs) != 0); + efrm_vi_rm_get_ref(evq_virs); + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " uses event queue " + EFRM_RESOURCE_FMT, + __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), + EFRM_RESOURCE_PRI_ARG(evq_virs->rs.rs_handle)); + } + virs->evq_virs = evq_virs; + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + rc = efrm_vi_rm_alloc_or_free_buffer_table(virs, true); + if (rc != 0) + goto fail_buffer_table; +#endif + + rc = 0; + EFRM_FOR_EACH_NIC(nic_i, nic) + if (rc == 0) + /* This updates virs->nic_set for the NICs which need + * finalising. */ + rc = efrm_vi_rm_init_or_fini_nic(virs, true, nic_i); + if (rc != 0) + goto fail_init_nic; + + /* Put it into the resource manager's table. */ + rc = efrm_resource_manager_insert(&virs->rs); + if (rc != 0) { + if (atomic_dec_and_test(&virs->rs.rs_ref_count)) + efrm_vi_resource_free(virs); + return rc; + } + + *virs_in_out = virs; + EFRM_TRACE("%s: Allocated " EFRM_RESOURCE_FMT, __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); + return 0; + +destroy: + virs = *virs_in_out; + EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 1); + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + EFRM_TRACE("%s: Freeing %d", __FUNCTION__, + EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)); + + /* Destroying the VI. The reference count must be zero. */ + EFRM_ASSERT(atomic_read(&virs->evq_refs) == 0); + + /* The EVQ should have gone (and DMA disabled) so that this + * function can't be re-entered to destroy the EVQ VI. */ + EFRM_ASSERT(virs->evq_virs == NULL); + rc = 0; + +fail_init_nic: + EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) + efrm_vi_rm_init_or_fini_nic(virs, false, nic_i); + +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + efrm_vi_rm_alloc_or_free_buffer_table(virs, false); +fail_buffer_table: +#endif + + efrm_vi_rm_detach_evq(virs); + +fail_adjust_request: + + EFRM_ASSERT(virs->evq_callback_fn == NULL); + memset(&efrm_vi_manager->evq_infos[instance], 0, + sizeof(struct vi_resource_evq_info)); + EFRM_TRACE("%s: delete VI ID %d", __FUNCTION__, instance); + efrm_vi_rm_free_id(instance); +fail_alloc_id: + + EFRM_DO_DEBUG(memset(virs, 0, sizeof(*virs))); + kfree(virs); +fail_alloc: + *virs_in_out = NULL; + + return rc; +} + +/*** Resource object ****************************************************/ + +int +efrm_vi_resource_alloc(struct vi_resource *evq_virs, + uint16_t vi_flags, int32_t evq_capacity, + int32_t txq_capacity, int32_t rxq_capacity, + uint8_t tx_q_tag, uint8_t rx_q_tag, + struct vi_resource **virs_out, + uint32_t *out_io_mmap_bytes, + uint32_t *out_mem_mmap_bytes, + uint32_t *out_txq_capacity, uint32_t *out_rxq_capacity) +{ + int rc; + rc = efrm_vi_resource_alloc_or_free(true, evq_virs, vi_flags, + evq_capacity, txq_capacity, + rxq_capacity, tx_q_tag, rx_q_tag, + virs_out); + if (rc == 0) { + if (out_io_mmap_bytes != NULL) + *out_io_mmap_bytes = (*virs_out)->bar_mmap_bytes; + if (out_mem_mmap_bytes != NULL) + *out_mem_mmap_bytes = (*virs_out)->mem_mmap_bytes; + if (out_txq_capacity != NULL) + *out_txq_capacity = + (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX]; + if (out_rxq_capacity != NULL) + *out_rxq_capacity = + (*virs_out)->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]; + } + + return rc; +} +EXPORT_SYMBOL(efrm_vi_resource_alloc); + +void efrm_vi_rm_free_flushed_resource(struct vi_resource *virs) +{ + EFRM_ASSERT(virs != NULL); + EFRM_ASSERT(atomic_read(&virs->rs.rs_ref_count) == 0); + + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); + /* release the associated event queue then drop our own reference + * count */ + efrm_vi_rm_detach_evq(virs); + efrm_vi_rm_drop_ref(virs); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_event.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_event.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,232 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains event handling for VI resource. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include + +void +efrm_eventq_request_wakeup(struct vi_resource *virs, unsigned current_ptr, + unsigned nic_index) +{ + struct efhw_nic *nic; + int next_i; + EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index)); + nic = efrm_nic_table.nic[nic_index]; + EFRM_ASSERT(nic); + next_i = ((current_ptr / sizeof(efhw_event_t)) & + (virs->evq_capacity - 1)); + + efhw_nic_wakeup_request(nic, efrm_eventq_dma_addr(virs, nic_index), + next_i, + EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)); +} +EXPORT_SYMBOL(efrm_eventq_request_wakeup); + +void efrm_eventq_reset(struct vi_resource *virs, int nic_index) +{ + struct efhw_nic *nic = efrm_nic_table.nic[nic_index]; + int instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + EFRM_ASSERT(virs->evq_capacity != 0); + EFRM_ASSERT(efrm_nic_set_read(&virs->nic_set, nic_index)); + + /* FIXME: Protect against concurrent resets. */ + + efhw_nic_event_queue_disable(nic, instance, 0); + + memset(efrm_eventq_base(virs, nic_index), EFHW_CLEAR_EVENT_VALUE, + efrm_eventq_bytes(virs, nic_index)); + efhw_nic_event_queue_enable(nic, instance, virs->evq_capacity, + efrm_eventq_dma_addr(virs, nic_index), + virs->nic_info[nic_index].evq_pages. + buf_tbl_alloc.base); + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT, __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle)); +} +EXPORT_SYMBOL(efrm_eventq_reset); + +int +efrm_eventq_register_callback(struct vi_resource *virs, + void (*handler) (void *, int, + struct efhw_nic *nic), + void *arg) +{ + int instance; + int bit; + + EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); + EFRM_ASSERT(virs->evq_capacity != 0); + + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + /* The handler can be set only once. */ + bit = test_and_set_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, + &efrm_vi_manager->evq_infos[instance].evq_state); + if (bit) + return -EBUSY; + + /* Store the details. The order is important here. */ + virs->evq_callback_arg = arg; + virs->evq_callback_fn = handler; + + return 0; +} +EXPORT_SYMBOL(efrm_eventq_register_callback); + +void efrm_eventq_kill_callback(struct vi_resource *virs) +{ + int nic_i, instance; + struct efhw_nic *nic; + struct vi_resource_evq_info *evq_info; + int32_t evq_state; + int bit; + + EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); + EFRM_ASSERT(virs->evq_capacity != 0); + + /* Clean out the callback so a new one can be installed. */ + virs->evq_callback_fn = NULL; + + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + evq_info = &efrm_vi_manager->evq_infos[instance]; + + /* Disable the event queue. */ + EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) + efhw_nic_event_queue_disable(nic, instance, /*timer_only */ 1); + + /* Disable the callback. */ + bit = test_and_clear_bit(VI_RESOURCE_EVQ_STATE_CALLBACK_REGISTERED, + &evq_info->evq_state); + EFRM_ASSERT(bit); /* do not call me twice! */ + + /* Spin until the callback is complete. */ + do { + rmb(); + + udelay(1); + evq_state = evq_info->evq_state; + } while ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY))); +} +EXPORT_SYMBOL(efrm_eventq_kill_callback); + +static void +efrm_eventq_do_callback(struct efhw_nic *nic, unsigned instance, + bool is_timeout) +{ + void (*handler) (void *, int is_timeout, struct efhw_nic *nic); + void *arg; + struct vi_resource_evq_info *evq_info; + int32_t evq_state; + int32_t new_evq_state; + struct vi_resource *virs; + int bit; + + EFRM_TRACE("%s: q=%d %s", __FUNCTION__, instance, + is_timeout ? "timeout" : "wakeup"); + EFRM_ASSERT(efrm_vi_manager); + + evq_info = &efrm_vi_manager->evq_infos[instance]; + + /* Set the BUSY bit and clear WAKEUP_PENDING. Do this + * before waking up the sleeper to avoid races. */ + while (1) { + evq_state = evq_info->evq_state; + new_evq_state = evq_state; + + if ((evq_state & VI_RESOURCE_EVQ_STATE(BUSY)) != 0) { + EFRM_ERR("%s:%d: evq_state[%d] corrupted!", + __FUNCTION__, __LINE__, instance); + return; + } + + if (!is_timeout) + new_evq_state &= ~VI_RESOURCE_EVQ_STATE(WAKEUP_PENDING); + + if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) { + new_evq_state |= VI_RESOURCE_EVQ_STATE(BUSY); + if (cmpxchg(&evq_info->evq_state, evq_state, + new_evq_state) == evq_state) { + virs = evq_info->evq_virs; + break; + } + + } else { + /* Just update the state if necessary. */ + if (new_evq_state == evq_state || + cmpxchg(&evq_info->evq_state, evq_state, + new_evq_state) == evq_state) + return; + } + + udelay(1); + } + + /* Call the callback if any. */ + if (evq_state & VI_RESOURCE_EVQ_STATE(CALLBACK_REGISTERED)) { + /* Retrieve the callback fn. */ + handler = virs->evq_callback_fn; + arg = virs->evq_callback_arg; + if (handler != NULL) /* avoid races */ + handler(arg, is_timeout, nic); + } + + /* Clear the BUSY bit. */ + bit = + test_and_clear_bit(VI_RESOURCE_EVQ_STATE_BUSY, + &evq_info->evq_state); + if (!bit) { + EFRM_ERR("%s:%d: evq_state corrupted!", + __FUNCTION__, __LINE__); + } +} + +void efrm_handle_wakeup_event(struct efhw_nic *nic, efhw_event_t *ev) +{ + efrm_eventq_do_callback(nic, + (unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev), + false); +} + +void efrm_handle_timeout_event(struct efhw_nic *nic, efhw_event_t *ev) +{ + efrm_eventq_do_callback(nic, + (unsigned int)FALCON_EVENT_WAKE_EVQ_ID(ev), + true); +} Index: head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_flush.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_flush.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,506 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains DMA queue flushing of VI resources. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include + +#if EFRM_VI_USE_WORKQUEUE + /* can fail as workitem can already be scheuled -- ignore failure */ +#define EFRM_VI_RM_DELAYED_FREE(manager) \ + queue_work(manager->workqueue, &manager->work_item) +#else +#define EFRM_VI_RM_DELAYED_FREE(manager) \ + efrm_vi_rm_delayed_free(&manager->work_item) +#endif + +static const int flush_fifo_hwm = 8 /* TODO should be a HW specific const */ ; + +static void +efrm_vi_resource_rx_flush_done(struct vi_resource *virs, int nic_i, + bool *completed) +{ + /* We should only get a flush event if there is a flush + * outstanding. */ + EFRM_ASSERT(efrm_nic_set_read + (&virs->rx_flush_outstanding_nic_set, nic_i)); + + efrm_nic_set_write(&virs->rx_flush_outstanding_nic_set, nic_i, false); + efrm_nic_set_write(&virs->rx_flush_nic_set, nic_i, false); + + if (efrm_nic_set_is_all_clear(&virs->rx_flush_outstanding_nic_set)) { + list_del(&virs->rx_flush_link); + efrm_vi_manager->rx_flush_outstanding_count--; + + if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) { + list_add_tail(&virs->rx_flush_link, + &efrm_vi_manager->close_pending); + *completed = 1; + } + } +} + +static void +efrm_vi_resource_tx_flush_done(struct vi_resource *virs, int nic_i, + bool *completed) +{ + /* We should only get a flush event if there is a flush + * outstanding. */ + EFRM_ASSERT(efrm_nic_set_read(&virs->tx_flush_nic_set, nic_i)); + + efrm_nic_set_write(&virs->tx_flush_nic_set, nic_i, false); + + if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) { + list_del(&virs->tx_flush_link); + + if (efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set)) { + list_add_tail(&virs->rx_flush_link, + &efrm_vi_manager->close_pending); + *completed = 1; + } + } +} + +static void +efrm_vi_resource_issue_rx_flush(struct vi_resource *virs, bool *completed) +{ + struct efhw_nic *nic; + int nic_i; + int instance; + int rc; + + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + list_add_tail(&virs->rx_flush_link, + &efrm_vi_manager->rx_flush_outstanding_list); + virs->rx_flush_outstanding_nic_set = virs->rx_flush_nic_set; + efrm_vi_manager->rx_flush_outstanding_count++; + + EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) { + EFRM_TRACE("%s: rx queue %d flush requested for nic %d", + __FUNCTION__, instance, nic->index); + rc = efhw_nic_flush_rx_dma_channel(nic, instance); + if (rc == -EAGAIN) + efrm_vi_resource_rx_flush_done(virs, nic_i, completed); + } +} + +static void +efrm_vi_resource_issue_tx_flush(struct vi_resource *virs, bool *completed) +{ + struct efhw_nic *nic; + int nic_i; + int instance; + int rc; + + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + list_add_tail(&virs->tx_flush_link, + &efrm_vi_manager->tx_flush_outstanding_list); + + EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) { + EFRM_TRACE("%s: tx queue %d flush requested for nic %d", + __FUNCTION__, instance, nic->index); + rc = efhw_nic_flush_tx_dma_channel(nic, instance); + if (rc == -EAGAIN) + efrm_vi_resource_tx_flush_done(virs, nic_i, completed); + } +} + +static void efrm_vi_resource_process_waiting_flushes(bool *completed) +{ + struct vi_resource *virs; + + while (efrm_vi_manager->rx_flush_outstanding_count < flush_fifo_hwm && + !list_empty(&efrm_vi_manager->rx_flush_waiting_list)) { + virs = + list_entry(list_pop + (&efrm_vi_manager->rx_flush_waiting_list), + struct vi_resource, rx_flush_link); + efrm_vi_resource_issue_rx_flush(virs, completed); + } +} + +#if BUG7916_WORKAROUND || BUG5302_WORKAROUND +static void +efrm_vi_resource_flush_retry_vi(struct vi_resource *virs, + int64_t time_now, bool *completed) +{ + struct efhw_nic *nic; + int nic_i; + int instance; + + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + virs->flush_count++; + virs->flush_time = time_now; + +#if BUG7916_WORKAROUND + if (!efrm_nic_set_is_all_clear(&virs->rx_flush_outstanding_nic_set)) { + EFRM_TRACE("%s: Retrying RX flush on instance %d", + __FUNCTION__, instance); + + list_del(&virs->rx_flush_link); + efrm_vi_manager->rx_flush_outstanding_count--; + efrm_vi_resource_issue_rx_flush(virs, completed); + efrm_vi_resource_process_waiting_flushes(completed); + } +#endif + +#if BUG5302_WORKAROUND + if (!efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) { + if (virs->flush_count > 5) { + EFRM_TRACE("%s: VI resource stuck flush pending " + "(instance=%d, count=%d)", + __FUNCTION__, instance, virs->flush_count); + EFRM_FOR_EACH_NIC_IN_SET(&virs->tx_flush_nic_set, + nic_i, nic) { + falcon_clobber_tx_dma_ptrs(nic, instance); + } + } else { + EFRM_TRACE("%s: Retrying TX flush on instance %d", + __FUNCTION__, instance); + } + + list_del(&virs->tx_flush_link); + efrm_vi_resource_issue_tx_flush(virs, completed); + } +#endif +} +#endif + +int efrm_vi_resource_flush_retry(struct vi_resource *virs) +{ +#if BUG7916_WORKAROUND || BUG5302_WORKAROUND + irq_flags_t lock_flags; + bool completed = false; + + if (efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set) && + efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) + return -EALREADY; + + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + efrm_vi_resource_flush_retry_vi(virs, get_jiffies_64(), &completed); + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); + + if (completed) + EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager); +#endif + + return 0; +} +EXPORT_SYMBOL(efrm_vi_resource_flush_retry); + +#if BUG7916_WORKAROUND || BUG5302_WORKAROUND +/* resource manager lock should be taken before this call */ +static void efrm_vi_handle_flush_loss(bool *completed) +{ + struct list_head *pos, *temp; + struct vi_resource *virs; + int64_t time_now, time_pending; + + /* It's possible we miss flushes - the list is sorted in order we + * generate flushes, see if any are very old. It's also possible + * that we decide an endpoint is flushed even though we've not + * received all the flush events. We *should * mark as + * completed, reclaim and loop again. ?? + * THIS NEEDS BACKPORTING FROM THE FALCON branch + */ + time_now = get_jiffies_64(); + +#if BUG7916_WORKAROUND + list_for_each_safe(pos, temp, + &efrm_vi_manager->rx_flush_outstanding_list) { + virs = container_of(pos, struct vi_resource, rx_flush_link); + + time_pending = time_now - virs->flush_time; + + /* List entries are held in reverse chronological order. Only + * process the old ones. */ + if (time_pending <= 0x100000000LL) + break; + + efrm_vi_resource_flush_retry_vi(virs, time_now, completed); + } +#endif + +#if BUG5302_WORKAROUND + list_for_each_safe(pos, temp, + &efrm_vi_manager->tx_flush_outstanding_list) { + virs = container_of(pos, struct vi_resource, tx_flush_link); + + time_pending = time_now - virs->flush_time; + + /* List entries are held in reverse chronological order. + * Only process the old ones. */ + if (time_pending <= 0x100000000LL) + break; + + efrm_vi_resource_flush_retry_vi(virs, time_now, completed); + } +#endif +} +#endif + +void +efrm_vi_register_flush_callback(struct vi_resource *virs, + void (*handler)(void *), void *arg) +{ + if (handler == NULL) { + virs->flush_callback_fn = handler; + wmb(); + virs->flush_callback_arg = arg; + } else { + virs->flush_callback_arg = arg; + wmb(); + virs->flush_callback_fn = handler; + } +} +EXPORT_SYMBOL(efrm_vi_register_flush_callback); + +int efrm_pt_flush(struct vi_resource *virs) +{ + int instance; + irq_flags_t lock_flags; + bool completed = false; + + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + EFRM_ASSERT(efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set)); + EFRM_ASSERT(efrm_nic_set_is_all_clear + (&virs->rx_flush_outstanding_nic_set)); + EFRM_ASSERT(efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)); + + EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " EVQ=%d TXQ=%d RXQ=%d", + __FUNCTION__, EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), + virs->evq_capacity, + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX], + virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]); + + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX] != 0) + virs->rx_flush_nic_set = virs->nic_set; + + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] != 0) + virs->tx_flush_nic_set = virs->nic_set; + + /* Clean up immediately if there are no flushes. */ + if (efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set) && + efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) { + list_add_tail(&virs->rx_flush_link, + &efrm_vi_manager->close_pending); + completed = true; + } + + /* Issue the RX flush if possible or queue it for later. */ + if (!efrm_nic_set_is_all_clear(&virs->rx_flush_nic_set)) { +#if BUG7916_WORKAROUND || BUG5302_WORKAROUND + if (efrm_vi_manager->rx_flush_outstanding_count >= + flush_fifo_hwm) + efrm_vi_handle_flush_loss(&completed); +#endif + if (efrm_vi_manager->rx_flush_outstanding_count >= + flush_fifo_hwm) { + list_add_tail(&virs->rx_flush_link, + &efrm_vi_manager->rx_flush_waiting_list); + } else { + efrm_vi_resource_issue_rx_flush(virs, &completed); + } + } + + /* Issue the TX flush. There's no limit to the number of + * outstanding TX flushes. */ + if (!efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) + efrm_vi_resource_issue_tx_flush(virs, &completed); + + virs->flush_time = get_jiffies_64(); + + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); + + if (completed) + EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager); + + return 0; +} +EXPORT_SYMBOL(efrm_pt_flush); + +static void +efrm_handle_rx_dmaq_flushed(struct efhw_nic *flush_nic, int instance, + bool *completed) +{ + struct list_head *pos, *temp; + struct vi_resource *virs; + + list_for_each_safe(pos, temp, + &efrm_vi_manager->rx_flush_outstanding_list) { + virs = container_of(pos, struct vi_resource, rx_flush_link); + + if (instance == EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)) { + efrm_vi_resource_rx_flush_done(virs, + flush_nic->index, + completed); + efrm_vi_resource_process_waiting_flushes(completed); + return; + } + } + EFRM_TRACE("%s: Unhandled rx flush event, nic %d, instance %d", + __FUNCTION__, flush_nic->index, instance); +} + +static void +efrm_handle_tx_dmaq_flushed(struct efhw_nic *flush_nic, int instance, + bool *completed) +{ + struct list_head *pos, *temp; + struct vi_resource *virs; + + list_for_each_safe(pos, temp, + &efrm_vi_manager->tx_flush_outstanding_list) { + virs = container_of(pos, struct vi_resource, tx_flush_link); + + if (instance == EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)) { + efrm_vi_resource_tx_flush_done(virs, + flush_nic->index, + completed); + return; + } + } + EFRM_TRACE("%s: Unhandled tx flush event, nic %d, instance %d", + __FUNCTION__, flush_nic->index, instance); +} + +void +efrm_handle_dmaq_flushed(struct efhw_nic *flush_nic, int instance, + int rx_flush) +{ + irq_flags_t lock_flags; + bool completed = false; + + EFRM_TRACE("%s: nic_i=%d instance=%d rx_flush=%d", __FUNCTION__, + flush_nic->index, instance, rx_flush); + + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + + if (rx_flush) + efrm_handle_rx_dmaq_flushed(flush_nic, instance, &completed); + else + efrm_handle_tx_dmaq_flushed(flush_nic, instance, &completed); + +#if BUG7916_WORKAROUND || BUG5302_WORKAROUND + efrm_vi_handle_flush_loss(&completed); +#endif + + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); + + if (completed) + EFRM_VI_RM_DELAYED_FREE(efrm_vi_manager); +} + +static void +efrm_vi_rm_reinit_dmaqs(struct vi_resource *virs) +{ + struct efhw_nic *nic; + int nic_i; + + EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) { + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_TX] != 0) + efrm_vi_rm_init_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_TX, + nic); + if (virs->dmaq_capacity[EFRM_VI_RM_DMA_QUEUE_RX]) + efrm_vi_rm_init_dmaq(virs, EFRM_VI_RM_DMA_QUEUE_RX, + nic); + } +} + +/* free any PT endpoints whose flush has now complete */ +void efrm_vi_rm_delayed_free(struct work_struct *data) +{ + irq_flags_t lock_flags; + struct list_head close_pending; + struct vi_resource *virs; + + EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_vi_manager->rm); + + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + list_replace_init(&efrm_vi_manager->close_pending, &close_pending); + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); + + EFRM_TRACE("%s: %p", __FUNCTION__, efrm_vi_manager); + while (!list_empty(&close_pending)) { + virs = + list_entry(list_pop(&close_pending), struct vi_resource, + rx_flush_link); + EFRM_TRACE("%s: flushed VI instance=%d", __FUNCTION__, + EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle)); + + if (virs->flush_callback_fn != NULL) { + efrm_vi_rm_reinit_dmaqs(virs); + virs->flush_callback_fn(virs->flush_callback_arg); + } else + efrm_vi_rm_free_flushed_resource(virs); + } +} + +void efrm_vi_rm_salvage_flushed_vis(void) +{ +#if BUG7916_WORKAROUND || BUG5302_WORKAROUND + irq_flags_t lock_flags; + bool completed; + + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + efrm_vi_handle_flush_loss(&completed); + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); +#endif + + efrm_vi_rm_delayed_free(&efrm_vi_manager->work_item); +} + +void efrm_vi_resource_free(struct vi_resource *virs) +{ + efrm_vi_register_flush_callback(virs, NULL, NULL); + efrm_pt_flush(virs); +} +EXPORT_SYMBOL(efrm_vi_resource_free); + +/* + * vi: sw=8:ai:aw + */ Index: head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_manager.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ head-2008-03-17/drivers/net/sfc/sfc_resource/vi_resource_manager.c 2008-03-17 14:27:58.000000000 +0100 @@ -0,0 +1,259 @@ +/**************************************************************************** + * Driver for Solarflare network controllers - + * resource management for Xen backend, OpenOnload, etc + * (including support for SFE4001 10GBT NIC) + * + * This file contains the VI resource manager. + * + * Copyright 2005-2007: Solarflare Communications Inc, + * 9501 Jeronimo Road, Suite 250, + * Irvine, CA 92618, USA + * + * Developed and maintained by Solarflare Communications: + * + * + * + * Certain parts of the driver were implemented by + * Alexandra Kossovsky + * OKTET Labs Ltd, Russia, + * http://oktetlabs.ru, + * by request of Solarflare Communications + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + **************************************************************************** + */ + +#include +#include +#include +#include +#include + +int efrm_pt_pace(struct vi_resource *virs, unsigned int val) +{ +#if defined(__CI_HARDWARE_CONFIG_FALCON__) + int instance, nic_i; + struct efhw_nic *nic; + + EFRM_RESOURCE_ASSERT_VALID(&virs->rs, 0); + instance = EFRM_RESOURCE_INSTANCE(virs->rs.rs_handle); + + EFRM_FOR_EACH_NIC_IN_SET(&virs->nic_set, nic_i, nic) + falcon_nic_pace(nic, instance, val); + + EFRM_TRACE("%s[%d]=%d DONE", __FUNCTION__, instance, val); + return 0; +#else + return -EOPNOTSUPP; +#endif +} +EXPORT_SYMBOL(efrm_pt_pace); + +/*** Resource manager creation/destruction *******************************/ + +static void efrm_vi_rm_dtor(struct efrm_resource_manager *rm); + +static int +efrm_create_or_destroy_vi_resource_manager( + struct efrm_resource_manager **rm_in_out, + const struct vi_resource_dimensions *dims, + bool destroy) +{ + struct vi_resource *virs; + struct list_head *pos, *temp; + struct list_head flush_pending; + irq_flags_t lock_flags; + int rc, i, n_evqs; + unsigned dmaq_min, dmaq_max; + + EFRM_ASSERT(rm_in_out); + + if (destroy) + goto destroy; + + EFRM_ASSERT(dims); + EFRM_NOTICE("vi_resource_manager: evq_int=%u-%u evq_timer=%u-%u", + dims->evq_int_min, dims->evq_int_max, + dims->evq_timer_min, dims->evq_timer_max); + EFRM_NOTICE("vi_resource_manager: rxq=%u-%u txq=%u-%u", + dims->rxq_min, dims->rxq_max, + dims->txq_min, dims->txq_max); + + efrm_vi_manager = kmalloc(sizeof(*efrm_vi_manager), GFP_KERNEL); + if (efrm_vi_manager == NULL) { + rc = -ENOMEM; + goto fail_alloc; + } + + memset(efrm_vi_manager, 0, sizeof(*efrm_vi_manager)); + + efrm_vi_manager->iscsi_dmaq_instance_is_free = true; + + dmaq_min = max(dims->rxq_min, dims->txq_min); + dmaq_max = min(dims->rxq_max, dims->txq_max); + + efrm_vi_manager->with_timer_base = + max(dmaq_min, dims->evq_timer_min); + efrm_vi_manager->with_timer_limit = + min(dmaq_max, dims->evq_timer_max); + rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_timer, + efrm_vi_manager->with_timer_base, + efrm_vi_manager->with_timer_limit, + &efrm_vi_manager->rm.rm_lock); + if (rc < 0) + goto fail_with_timer_id_pool; + + efrm_vi_manager->with_interrupt_base = + max(dmaq_min, dims->evq_int_min); + efrm_vi_manager->with_interrupt_limit = + min(dmaq_max, dims->evq_int_max); + efrm_vi_manager->with_interrupt_limit = + max(efrm_vi_manager->with_interrupt_limit, + efrm_vi_manager->with_interrupt_base); + rc = efrm_kfifo_id_ctor(&efrm_vi_manager->instances_with_interrupt, + efrm_vi_manager->with_interrupt_base, + efrm_vi_manager->with_interrupt_limit, + &efrm_vi_manager->rm.rm_lock); + if (rc < 0) + goto fail_with_int_id_pool; + + n_evqs = max(efrm_vi_manager->with_timer_limit, + efrm_vi_manager->with_interrupt_limit); + rc = -ENOMEM; + efrm_vi_manager->evq_infos = + vmalloc(n_evqs * sizeof(struct vi_resource_evq_info)); + if (efrm_vi_manager->evq_infos == NULL) + goto fail_alloc_evq_infos; + + for (i = 0; i < n_evqs; ++i) { + efrm_vi_manager->evq_infos[i].evq_state = 0; + efrm_vi_manager->evq_infos[i].evq_virs = NULL; + } + + INIT_LIST_HEAD(&efrm_vi_manager->rx_flush_waiting_list); + INIT_LIST_HEAD(&efrm_vi_manager->rx_flush_outstanding_list); + INIT_LIST_HEAD(&efrm_vi_manager->tx_flush_outstanding_list); + efrm_vi_manager->rx_flush_outstanding_count = 0; + + INIT_LIST_HEAD(&efrm_vi_manager->close_pending); +#if EFRM_VI_USE_WORKQUEUE + efrm_vi_manager->workqueue = create_workqueue("sfc_vi"); + if (efrm_vi_manager->workqueue == NULL) + goto fail_create_workqueue; +#endif + INIT_WORK(&efrm_vi_manager->work_item, efrm_vi_rm_delayed_free); + + /* NB. This must be the last step to avoid things getting tangled. + * efrm_resource_manager_dtor calls the vi_rm_dtor which ends up in + * this function. */ + rc = efrm_resource_manager_ctor(&efrm_vi_manager->rm, efrm_vi_rm_dtor, + "VI", EFRM_RESOURCE_VI, 0); + if (rc < 0) + goto fail_rm_ctor; + + *rm_in_out = &efrm_vi_manager->rm; + return 0; + +destroy: + rc = 0; + EFRM_RESOURCE_MANAGER_ASSERT_VALID(*rm_in_out); + + /* Abort outstanding flushes. Note, a VI resource can be on more + * than one of these lists. We handle this by starting with the TX + * list and then append VIs to this list if they aren't on the TX + * list already. A VI is on the TX flush list if tx_flush_nic_set + * is not empty. */ + spin_lock_irqsave(&efrm_vi_manager->rm.rm_lock, lock_flags); + + list_replace_init(&efrm_vi_manager->tx_flush_outstanding_list, + &flush_pending); + + list_for_each_safe(pos, temp, + &efrm_vi_manager->rx_flush_waiting_list) { + virs = container_of(pos, struct vi_resource, rx_flush_link); + + list_del(&virs->rx_flush_link); + if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) + list_add_tail(&virs->tx_flush_link, &flush_pending); + } + + list_for_each_safe(pos, temp, + &efrm_vi_manager->rx_flush_outstanding_list) { + virs = container_of(pos, struct vi_resource, rx_flush_link); + + list_del(&virs->rx_flush_link); + if (efrm_nic_set_is_all_clear(&virs->tx_flush_nic_set)) + list_add_tail(&virs->tx_flush_link, &flush_pending); + } + + spin_unlock_irqrestore(&efrm_vi_manager->rm.rm_lock, lock_flags); + + while (!list_empty(&flush_pending)) { + virs = + list_entry(list_pop(&flush_pending), struct vi_resource, + tx_flush_link); + EFRM_TRACE("%s: found PT endpoint " EFRM_RESOURCE_FMT + " with flush pending [Tx=0x%x, Rx=0x%x, RxO=0x%x]", + __FUNCTION__, + EFRM_RESOURCE_PRI_ARG(virs->rs.rs_handle), + virs->tx_flush_nic_set.nics, + virs->rx_flush_nic_set.nics, + virs->rx_flush_outstanding_nic_set.nics); + efrm_vi_rm_free_flushed_resource(virs); + } + +fail_rm_ctor: + + /* Complete outstanding closes. */ +#if EFRM_VI_USE_WORKQUEUE + destroy_workqueue(efrm_vi_manager->workqueue); +fail_create_workqueue: +#endif + EFRM_ASSERT(list_empty(&efrm_vi_manager->close_pending)); + + n_evqs = max(efrm_vi_manager->with_timer_limit, + efrm_vi_manager->with_interrupt_limit); + vfree(efrm_vi_manager->evq_infos); +fail_alloc_evq_infos: + + kfifo_vfree(efrm_vi_manager->instances_with_interrupt); +fail_with_int_id_pool: + + kfifo_vfree(efrm_vi_manager->instances_with_timer); +fail_with_timer_id_pool: + + if (destroy) + return 0; + + EFRM_DO_DEBUG(memset(efrm_vi_manager, 0, sizeof(*efrm_vi_manager))); + kfree(efrm_vi_manager); +fail_alloc: + + *rm_in_out = NULL; + EFRM_ERR("%s: failed rc=%d", __FUNCTION__, rc); + return rc; +} + +int +efrm_create_vi_resource_manager(struct efrm_resource_manager **rm_out, + const struct vi_resource_dimensions *dims) +{ + return efrm_create_or_destroy_vi_resource_manager(rm_out, dims, false); +} + +static void efrm_vi_rm_dtor(struct efrm_resource_manager *rm) +{ + efrm_create_or_destroy_vi_resource_manager(&rm, NULL, true); +}