#include <hallo.h> Debian GNU wrote on Wed Aug 01, 2001 um 04:02:19AM:
> This also I had tried, but still !! (: > Anything else ? Okay, I took the latest winbond-840.c that compiles with 2.2.x and integrated it into kernel source in a way similar to the ne2k-pci driver. Patch attached, should work. Gruss/Regards, Eduard. -- "Das hat Methode. Nur die Ahnungslosen haben genuegend Zeit, sich in Meetings zu profilieren und Karriere zu machen. Die anderen haben tags- ueber genug zu tun und keine Zeit fuer Schwallmeetings." [Holger Marzen]
diff -Nur kernel-source-2.2.19.orig/Documentation/Configure.help kernel-source-2.2.19/Documentation/Configure.help --- kernel-source-2.2.19.orig/Documentation/Configure.help Tue Mar 13 11:37:55 2001 +++ kernel-source-2.2.19/Documentation/Configure.help Wed Aug 1 16:43:26 2001 @@ -6157,6 +6157,13 @@ module, say M here and read Documentation/modules.txt as well as Documentation/networking/net-modules.txt. +COMPEX-RL100a / Winbond-W89c840 PCI Ethernet support +CONFIG_WINBOND_840 + This driver is for the Winbond W89c840 chip. It also works with + the TX9882 chip on the Compex RL100-ATX board. + More specific information and updates are available from + http://cesdis.gsfc.nasa.gov/linux/drivers/ethercard.html + PCI DM9102 support CONFIG_DM9102 This driver is for DM9102 compatible PCI cards from Davicom diff -Nur kernel-source-2.2.19.orig/drivers/net/Config.in kernel-source-2.2.19/drivers/net/Config.in --- kernel-source-2.2.19.orig/drivers/net/Config.in Tue Mar 13 11:38:14 2001 +++ kernel-source-2.2.19/drivers/net/Config.in Wed Aug 1 16:43:44 2001 @@ -160,6 +160,7 @@ tristate 'Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)' CONFIG_NE3210 fi tristate 'PCI NE2000 support' CONFIG_NE2K_PCI + tristate 'COMPEX-RL100a / Winbond-W89c840 Ethernet card support' CONFIG_WINBOND_840 tristate 'TI ThunderLAN support' CONFIG_TLAN tristate 'VIA Rhine support' CONFIG_VIA_RHINE tristate 'SiS 900/7016 PCI Fast Ethernet Adapter support' CONFIG_SIS900 diff -Nur kernel-source-2.2.19.orig/drivers/net/Makefile kernel-source-2.2.19/drivers/net/Makefile --- kernel-source-2.2.19.orig/drivers/net/Makefile Tue Mar 13 11:38:14 2001 +++ kernel-source-2.2.19/drivers/net/Makefile Wed Aug 1 16:26:43 2001 @@ -192,6 +192,14 @@ endif endif +ifeq ($(CONFIG_WINBOND_840),y) +L_OBJS += winbond-840.o +else + ifeq ($(CONFIG_WINBOND_840),m) + M_OBJS += winbond-840.o + endif +endif + ifeq ($(CONFIG_NE2K_PCI),y) L_OBJS += ne2k-pci.o CONFIG_8390_BUILTIN = y diff -Nur kernel-source-2.2.19.orig/drivers/net/Space.c kernel-source-2.2.19/drivers/net/Space.c --- kernel-source-2.2.19.orig/drivers/net/Space.c Tue Mar 13 11:38:14 2001 +++ kernel-source-2.2.19/drivers/net/Space.c Wed Aug 1 12:01:33 2001 @@ -229,6 +229,9 @@ #ifdef CONFIG_NE2K_PCI {ne2k_pci_probe, 0}, #endif +#ifdef CONFIG_WINBOND_840 + {winbond840_probe, 0}, +#endif #ifdef CONFIG_PCNET32 {pcnet32_probe, 0}, #endif diff -Nur kernel-source-2.2.19.orig/drivers/net/winbond-840.c kernel-source-2.2.19/drivers/net/winbond-840.c --- kernel-source-2.2.19.orig/drivers/net/winbond-840.c Thu Jan 1 01:00:00 1970 +++ kernel-source-2.2.19/drivers/net/winbond-840.c Wed Aug 1 16:31:16 2001 @@ -0,0 +1,1543 @@ +/* winbond-840.c: A Linux PCI network adapter skeleton device driver. */ +/* + Written 1998-1999 by Donald Becker. + + This software may be used and distributed according to the terms + of the GNU Public License (GPL), incorporated herein by reference. + Drivers based on this code fall under the GPL and must retain + this authorship (implicit copyright) notice. + + The author may be reached as [EMAIL PROTECTED], or + USRA Center of Excellence in Space Data and Information Sciences + Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771 + + Support and updates available at + http://cesdis.gsfc.nasa.gov/linux/drivers/index.html + + Do not remove the copyright infomation. + Do not change the version information unless an improvement has been made. + Merely removing my name, as Compex has done in the past, does not count + as an improvement. +*/ +static const char *version = +"winbond-840.c:v0.12 9/7/99 Written by Donald Becker\n"; +static const char *version1 = +" http://cesdis.gsfc.nasa.gov/linux/drivers/ethercard.html\n"; + +/* +probe-func: winbond840_probe +config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840 + +c-help-name: Winbond W89c840 PCI Ethernet support +c-help-symbol: CONFIG_WINBOND_840 +c-help: This driver is for the Winbond W89c840 chip. It also works with +c-help: the TX9882 chip on the Compex RL100-ATX board. +c-help: More specific information and updates are available from +c-help: http://cesdis.gsfc.nasa.gov/linux/drivers/ethercard.html +*/ + +/* A few user-configurable values. These may be modified when a driver + module is loaded.*/ + +static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ +static int max_interrupt_work = 20; +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The '840 uses a 64 element hash table based on the Ethernet CRC. */ +static int multicast_filter_limit = 32; + +/* Set the copy breakpoint for the copy-only-tiny-frames scheme. + Setting to > 1518 effectively disables this feature. */ +static int rx_copybreak = 0; + +/* Used to pass the media type, etc. + Both 'options[]' and 'full_duplex[]' should exist for driver + interoperability. + The media type is usually passed in 'options[]'. +*/ +#define MAX_UNITS 8 /* More are supported, limit only on options */ +static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; +static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; + +/* Operational parameters that are set at compile time. */ + +/* Keep the ring sizes a power of two for compile efficiency. + The compiler will convert <unsigned>'%'<2^N> into a bit mask. + Making the Tx ring too large decreases the effectiveness of channel + bonding and packet priority. + There are no ill effects from too-large receive rings. */ +#define TX_RING_SIZE 16 +#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ +#define RX_RING_SIZE 32 + +/* The presumed FIFO size for working around the Tx-FIFO-overflow bug. + To avoid overflowing we don't queue again until we have room for a + full-size packet. + */ +#define TX_FIFO_SIZE (2048) +#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16) + +/* Operational parameters that usually are not changed. */ +/* Time in jiffies before concluding the transmitter is hung. */ +#define TX_TIMEOUT (2*HZ) + +#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ + +#if !defined(__OPTIMIZE__) || !defined(__KERNEL__) +#warning You must compile this file with the correct options! +#warning See the last lines of the source file. +#error You must compile this driver with "-O". +#endif + +/* Include files, designed to support most kernel versions 2.0.0 and later. */ +#include <linux/config.h> +#include <linux/version.h> +#ifdef MODULE +#ifdef MODVERSIONS +#include <linux/modversions.h> +#endif +#include <linux/module.h> +#else +#define MOD_INC_USE_COUNT +#define MOD_DEC_USE_COUNT +#endif + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/ioport.h> +#include <linux/malloc.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/skbuff.h> +#include <asm/processor.h> /* Processor type for cache alignment. */ +#include <asm/bitops.h> +#include <asm/io.h> +#ifdef HAS_PCI_NETIF +#include "pci-netif.h" +#endif + +#if defined(MODULE) && LINUX_VERSION_CODE > 0x20115 +MODULE_AUTHOR("Donald Becker <[EMAIL PROTECTED]>"); +MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); +MODULE_PARM(max_interrupt_work, "i"); +MODULE_PARM(debug, "i"); +MODULE_PARM(rx_copybreak, "i"); +MODULE_PARM(multicast_filter_limit, "i"); +MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i"); +MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i"); +#endif + +/* This driver was written to use PCI memory space, however some x86 systems + work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space + accesses instead of memory space. */ +#ifdef USE_IO_OPS +#undef readb +#undef readw +#undef readl +#undef writeb +#undef writew +#undef writel +#define readb inb +#define readw inw +#define readl inl +#define writeb outb +#define writew outw +#define writel outl +#endif + + +/* Kernel compatibility defines, some common to David Hinds' PCMCIA package. + This is only in the support-all-kernels source code. */ + +#if (LINUX_VERSION_CODE >= 0x20100) +char kernel_version[] = UTS_RELEASE; +#else +#ifndef __alpha__ +#define ioremap vremap +#define iounmap vfree +#endif +#endif +#if LINUX_VERSION_CODE < 0x20123 +#define test_and_set_bit(val, addr) set_bit(val, addr) +#endif +#if LINUX_VERSION_CODE <= 0x20139 +#define net_device_stats enet_statistics +#else +#define NETSTATS_VER2 +#endif +#if LINUX_VERSION_CODE < 0x20155 || defined(CARDBUS) +/* Grrrr, the PCI code changed, but did not consider CardBus... */ +#include <linux/bios32.h> +#define PCI_SUPPORT_VER1 +#else +#define PCI_SUPPORT_VER2 +#endif +#if LINUX_VERSION_CODE < 0x20159 +#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE); +#else +#define dev_free_skb(skb) dev_kfree_skb(skb); +#endif +#if ! defined(CAP_NET_ADMIN) +#define capable(CAP_XXX) (suser()) +#endif +#if ! defined(HAS_NETIF_QUEUE) +#define netif_wake_queue(dev) mark_bh(NET_BH); +#endif +#if LINUX_VERSION_CODE < 0x2030d +#define net_device device +#endif + +/* + Theory of Operation + +I. Board Compatibility + +This driver is for the Winbond w89c840 chip. + +II. Board-specific settings + +None. + +III. Driver operation + +This chip is very similar to the Digital 21*4* "Tulip" family. The first +twelve registers and the descriptor format are nearly identical. Read a +Tulip manual for operational details. + +A significant difference is that the multicast filter and station address are +stored in registers rather than loaded through a pseudo-transmit packet. + +Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a +full-sized packet we must use both data buffers in a descriptor. Thus the +driver uses ring mode where descriptors are implicitly sequential in memory, +rather than using the second descriptor address as a chain pointer to +subsequent descriptors. + +IV. Notes + +If you are going to almost clone a Tulip, why not go all the way and avoid +the need for a new driver? + +IVb. References + +http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html +http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html + +No public w89c840 datasheet is available. + +IVc. Errata + +A horrible bug exists in the transmit FIFO. Apparently the chip doesn't +correctly detect a full FIFO, and queuing more than 2048 bytes may result in +silent data corruption. + +*/ + + + +/* + PCI probe table. +*/ + +#if ! defined(HAS_PCI_NETIF) +enum pci_flags_bit { + PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, + PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, +}; +struct pci_id_info { + const char *name; + struct match_info { + int pci, pci_mask, subsystem, subsystem_mask; + int revision, revision_mask; + } id; + int pci_flags, io_size; + int drv_flags; /* Driver use, intended as capability flags. */ + void *(*probe1)(int pci_bus, int pci_devfn, void *init_dev, + long ioaddr, int irq, int chip_idx, int fnd_cnt); + /* Optional, called for suspend, resume and detach. */ + int (*pwr_event)(struct net_device *dev, int event); +}; +#endif + +static void *w840_probe1(int pci_bus, int devfn, void *init_dev, + long ioaddr, int irq, int chp_idx, int fnd_cnt); +enum chip_capability_flags {CanHaveMII=1, HasBrokenTx=2}; +#ifdef USE_IO_OPS +#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER) +#else +#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER) +#endif + +static struct pci_id_info pci_tbl[] = { + {"Winbond W89c840", { 0x08401050, 0xffffffff, }, + W840_FLAGS, 128, CanHaveMII | HasBrokenTx, w840_probe1}, + {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,}, + W840_FLAGS, 128, CanHaveMII | HasBrokenTx, w840_probe1}, + {0,}, /* 0 terminated list. */ +}; + +/* Offsets to the Command and Status Registers, "CSRs". + While similar to the Tulip, these registers are longword aligned. + Note: It's not useful to define symbolic names for every register bit in + the device. The name can only partially document the semantics and make + the driver longer and more difficult to read. +*/ +enum w840_offsets { + PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08, + RxRingPtr=0x0C, TxRingPtr=0x10, + IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C, + RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C, + CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */ + MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40, + CurTxDescAddr=0x4C, CurTxBufAddr=0x50, +}; + +/* Bits in the interrupt status/enable registers. */ +/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */ +enum intr_status_bits { + NormalIntr=0x10000, AbnormalIntr=0x8000, + IntrPCIErr=0x2000, TimerInt=0x800, + IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40, + TxFIFOUnderflow=0x20, RxErrIntr=0x10, + TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01, +}; + +/* Bits in the NetworkConfig register. */ +enum rx_mode_bits { + AcceptErr=0x80, AcceptRunt=0x40, + AcceptBroadcast=0x20, AcceptMulticast=0x10, + AcceptAllPhys=0x08, AcceptMyPhys=0x02, +}; + +enum mii_reg_bits { + MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000, + MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000, +}; + +/* The Tulip Rx and Tx buffer descriptors. */ +struct w840_rx_desc { + s32 status; + s32 length; + u32 buffer1; + u32 next_desc; +}; + +struct w840_tx_desc { + s32 status; + s32 length; + u32 buffer1, buffer2; /* We use only buffer 1. */ +}; + +/* Bits in network_desc.status */ +enum desc_status_bits { + DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000, + DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000, + DescIntr=0x80000000, +}; + +struct netdev_private { + /* Descriptor rings first for alignment. */ + struct w840_rx_desc rx_ring[RX_RING_SIZE]; + struct w840_tx_desc tx_ring[TX_RING_SIZE]; + struct net_device *next_module; /* Link for devices of this type. */ + const char *product_name; + /* The addresses of receive-in-place skbuffs. */ + struct sk_buff* rx_skbuff[RX_RING_SIZE]; + /* The saved address of a sent-in-place packet/buffer, for later free(). */ + struct sk_buff* tx_skbuff[TX_RING_SIZE]; + struct net_device_stats stats; + struct timer_list timer; /* Media monitoring timer. */ + /* Frequently used values: keep some adjacent for cache effect. */ + int chip_id; + int flags; + unsigned char pci_bus, pci_devfn; + int csr6; + struct w840_rx_desc *rx_head_desc; + unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ + unsigned int rx_buf_sz; /* Based on MTU+slack. */ + unsigned int cur_tx, dirty_tx; + int tx_q_bytes; + unsigned int tx_full:1; /* The Tx queue is full. */ + /* These values are keep track of the transceiver/media in use. */ + unsigned int full_duplex:1; /* Full-duplex operation requested. */ + unsigned int duplex_lock:1; + unsigned int medialock:1; /* Do not sense media. */ + unsigned int default_port:4; /* Last dev->if_port value. */ + /* MII transceiver section. */ + int mii_cnt; /* MII device addresses. */ + u16 advertising; /* NWay media advertisement */ + unsigned char phys[2]; /* MII device addresses. */ + u32 pad[4]; /* Used for 32-byte alignment */ +}; + +static int eeprom_read(long ioaddr, int location); +static int mdio_read(struct net_device *dev, int phy_id, int location); +static void mdio_write(struct net_device *dev, int phy_id, int location, int value); +static int netdev_open(struct net_device *dev); +static void check_duplex(struct net_device *dev); +static void netdev_timer(unsigned long data); +static void tx_timeout(struct net_device *dev); +static void init_ring(struct net_device *dev); +static int start_tx(struct sk_buff *skb, struct net_device *dev); +static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs); +static void netdev_error(struct net_device *dev, int intr_status); +static int netdev_rx(struct net_device *dev); +static void netdev_error(struct net_device *dev, int intr_status); +static inline unsigned ether_crc(int length, unsigned char *data); +static void set_rx_mode(struct net_device *dev); +static struct net_device_stats *get_stats(struct net_device *dev); +static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int netdev_close(struct net_device *dev); + + + +/* A list of our installed devices, for removing the driver module. */ +static struct net_device *root_net_dev = NULL; + +#if ! defined(HAS_PCI_NETIF) +/* Ideally we would detect all network cards in slot order. That would + be best done a central PCI probe dispatch, which wouldn't work + well when dynamically adding drivers. So instead we detect just the + cards we know about in slot order. */ + +static int netif_pci_probe(struct pci_id_info pci_tbl[], void *init_dev) +{ + int cards_found = 0; + int pci_index = 0; + unsigned char pci_bus, pci_device_fn; + + if ( ! pcibios_present()) + return -ENODEV; + + for (;pci_index < 0xff; pci_index++) { + u32 pci_id, pci_subsystem_id; + u16 pci_command, new_command; + u8 pci_revision; + int chip_idx, irq, pci_flags; + long pciaddr; + long ioaddr; + + if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index, + &pci_bus, &pci_device_fn) + != PCIBIOS_SUCCESSFUL) + break; + pcibios_read_config_dword(pci_bus, pci_device_fn, + PCI_VENDOR_ID, &pci_id); + pcibios_read_config_dword(pci_bus, pci_device_fn, + PCI_SUBSYSTEM_ID, &pci_subsystem_id); + pcibios_read_config_byte (pci_bus, pci_device_fn, + PCI_SUBSYSTEM_ID, &pci_revision); + + for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) { + if ((pci_id & pci_tbl[chip_idx].id.pci_mask) + == pci_tbl[chip_idx].id.pci + && (pci_subsystem_id & pci_tbl[chip_idx].id.subsystem_mask) + == pci_tbl[chip_idx].id.subsystem + && (pci_revision & pci_tbl[chip_idx].id.revision_mask) + == pci_tbl[chip_idx].id.revision + ) + break; + } + if (pci_tbl[chip_idx].id.pci == 0) /* Compiled out! */ + continue; + + pci_flags = pci_tbl[chip_idx].pci_flags; + { +#if defined(PCI_SUPPORT_VER2) + struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn); + pciaddr = pdev->base_address[pci_flags & PCI_ADDR0 ? 0 : 1]; + irq = pdev->irq; +#else + u32 pci_memaddr; + u8 pci_irq_line; + pcibios_read_config_byte(pci_bus, pci_device_fn, + PCI_INTERRUPT_LINE, &pci_irq_line); + pcibios_read_config_dword(pci_bus, pci_device_fn, + pci_flags & PCI_ADDR0 ? PCI_BASE_ADDRESS_0 : + PCI_BASE_ADDRESS_1 , &pci_memaddr); + pciaddr = pci_memaddr; + irq = pci_irq_line; +#endif + } + + if (debug > 2) + printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n", + pci_tbl[chip_idx].name, pciaddr, irq); + + if (pciaddr & PCI_BASE_ADDRESS_SPACE_IO) { + ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK; + if (check_region(ioaddr, pci_tbl[chip_idx].io_size)) + continue; + } else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK, + pci_tbl[chip_idx].io_size)) == 0) { + printk(KERN_INFO "Failed to map PCI address %#lx.\n", + pciaddr); + continue; + } + + pcibios_read_config_word(pci_bus, pci_device_fn, + PCI_COMMAND, &pci_command); + new_command = pci_command | (pci_tbl[chip_idx].pci_flags & 7); + if (pci_command != new_command) { + printk(KERN_INFO " The PCI BIOS has not enabled the" + " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n", + pci_bus, pci_device_fn, pci_command, new_command); + pcibios_write_config_word(pci_bus, pci_device_fn, + PCI_COMMAND, new_command); + } + + init_dev = pci_tbl[chip_idx].probe1(pci_bus, pci_device_fn, init_dev, + ioaddr, irq, chip_idx, cards_found); + + if (init_dev && (pci_tbl[chip_idx].pci_flags & PCI_COMMAND_MASTER)) { + u8 pci_latency; + pcibios_read_config_byte(pci_bus, pci_device_fn, + PCI_LATENCY_TIMER, &pci_latency); + if (pci_latency < 32) { + printk(KERN_INFO " PCI latency timer (CFLT) is " + "unreasonably low at %d. Setting to %d clocks.\n", + pci_latency, 32); + pcibios_write_config_byte(pci_bus, pci_device_fn, + PCI_LATENCY_TIMER, 32); + } + } + init_dev = 0; + cards_found++; + } + + return cards_found ? 0 : -ENODEV; +} +#endif + +static void *w840_probe1(int pci_bus, int devfn, void *init_dev, + long ioaddr, int irq, int chip_idx, int find_cnt) +{ + struct net_device *dev; + struct netdev_private *np; + int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; + + dev = init_etherdev(init_dev, 0); + + printk(KERN_INFO "%s: %s at 0x%lx, ", + dev->name, pci_tbl[chip_idx].name, ioaddr); + + /* Warning: broken for big-endian machines. */ + for (i = 0; i < 3; i++) + ((u16 *)dev->dev_addr)[i] = eeprom_read(ioaddr, i); + + for (i = 0; i < 5; i++) + printk("%2.2x:", dev->dev_addr[i]); + printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); + +#ifdef USE_IO_OPS + request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name); +#endif + +#if ! defined(final_version) /* Dump the EEPROM contents during development. */ + if (debug > 4) + for (i = 0; i < 64; i++) + printk("%2.2x%s", eeprom_read(ioaddr, i), i%16 != 15 ? " " : "\n"); +#endif + + /* Reset the chip to erase previous misconfiguration. + No hold time required! */ + writel(0x00000001, ioaddr + PCIBusCfg); + + dev->base_addr = ioaddr; + dev->irq = irq; + + /* Make certain the descriptor lists are aligned. */ + np = (void *)(((long)kmalloc(sizeof(*np), GFP_KERNEL) + 15) & ~15); + memset(np, 0, sizeof(*np)); + dev->priv = np; + + np->next_module = root_net_dev; + root_net_dev = dev; + + np->pci_bus = pci_bus; + np->pci_devfn = devfn; + np->chip_id = chip_idx; + np->flags = pci_tbl[np->chip_id].drv_flags; + + if (dev->mem_start) + option = dev->mem_start; + + /* The lower four bits are the media type. */ + if (option > 0) { + if (option & 0x200) + np->full_duplex = 1; + np->default_port = option & 15; + if (np->default_port) + np->medialock = 1; + } + if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) + np->full_duplex = 1; + + if (np->full_duplex) + np->duplex_lock = 1; + + /* The chip-specific entries in the device structure. */ + dev->open = &netdev_open; + dev->hard_start_xmit = &start_tx; + dev->stop = &netdev_close; + dev->get_stats = &get_stats; + dev->set_multicast_list = &set_rx_mode; + dev->do_ioctl = &mii_ioctl; + + if (np->flags & CanHaveMII) { + int phy, phy_idx = 0; + for (phy = 1; phy < 32 && phy_idx < 4; phy++) { + int mii_status = mdio_read(dev, phy, 1); + if (mii_status != 0xffff && mii_status != 0x0000) { + np->phys[phy_idx++] = phy; + np->advertising = mdio_read(dev, phy, 4); + printk(KERN_INFO "%s: MII PHY found at address %d, status " + "0x%4.4x advertising %4.4x.\n", + dev->name, phy, mii_status, np->advertising); + } + } + np->mii_cnt = phy_idx; + if (phy_idx == 0) { + printk(KERN_WARNING "%s: MII PHY not found -- this device may " + "not operate correctly.\n", dev->name); + } + } + + return dev; +} + + +/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are + often serial bit streams generated by the host processor. + The example below is for the common 93c46 EEPROM, 64 16 bit words. */ + +/* Delay between EEPROM clock transitions. + No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need + a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that + made udelay() unreliable. + The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is + depricated. +*/ +#define eeprom_delay(eeaddr) readl(eeaddr) + + +/* EEPROM_Ctrl bits. */ +#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */ +#define EE_CS 0x01 /* EEPROM chip select. */ +#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */ +#define EE_WRITE_0 0x01 +#define EE_WRITE_1 0x05 +#define EE_DATA_READ 0x08 /* EEPROM chip data out. */ +#define EE_ENB (0x4800 | EE_CS) + + +enum EEPROM_Ctrl_Bits { + EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805, + EE_ChipSelect=0x801, EE_DataIn=0x08, +}; + +/* The EEPROM commands include the alway-set leading bit. */ +enum EEPROM_Cmds { + EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), +}; + +static int eeprom_read(long addr, int location) +{ + int i; + int retval = 0; + int ee_addr = addr + EECtrl; + int read_cmd = location | EE_ReadCmd; + writel(EE_ChipSelect, ee_addr); + + /* Shift the read command bits out. */ + for (i = 10; i >= 0; i--) { + short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; + writel(dataval, ee_addr); + eeprom_delay(ee_addr); + writel(dataval | EE_ShiftClk, ee_addr); + eeprom_delay(ee_addr); + } + writel(EE_ChipSelect, ee_addr); + + for (i = 16; i > 0; i--) { + writel(EE_ChipSelect | EE_ShiftClk, ee_addr); + eeprom_delay(ee_addr); + retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0); + writel(EE_ChipSelect, ee_addr); + eeprom_delay(ee_addr); + } + + /* Terminate the EEPROM access. */ + writel(0, ee_addr); + return retval; +} + +/* MII transceiver control section. + Read and write the MII registers using software-generated serial + MDIO protocol. See the MII specifications or DP83840A data sheet + for details. + + The maximum data clock rate is 2.5 Mhz. The minimum timing is usually + met by back-to-back 33Mhz PCI cycles. */ +#define mdio_delay(mdio_addr) readl(mdio_addr) + +/* Set iff a MII transceiver on any interface requires mdio preamble. + This only set with older tranceivers, so the extra + code size of a per-interface flag is not worthwhile. */ +static char mii_preamble_required = 1; + +#define MDIO_WRITE0 (MDIO_EnbOutput) +#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput) + +/* Generate the preamble required for initial synchronization and + a few older transceivers. */ +static void mdio_sync(long mdio_addr) +{ + int bits = 32; + + /* Establish sync by sending at least 32 logic ones. */ + while (--bits >= 0) { + writel(MDIO_WRITE1, mdio_addr); + mdio_delay(mdio_addr); + writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } +} + +static int mdio_read(struct net_device *dev, int phy_id, int location) +{ + long mdio_addr = dev->base_addr + MIICtrl; + int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; + int i; + int retval = 0; + + if (mii_preamble_required) + mdio_sync(mdio_addr); + + /* Shift the read command bits out. */ + for (i = 15; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + + writel(dataval, mdio_addr); + mdio_delay(mdio_addr); + writel(dataval | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } + /* Read the two transition, 16 data, and wire-idle bits. */ + for (i = 20; i > 0; i--) { + writel(MDIO_EnbIn, mdio_addr); + mdio_delay(mdio_addr); + retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0); + writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } + return (retval>>1) & 0xffff; +} + +static void mdio_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + long mdio_addr = dev->base_addr + MIICtrl; + int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; + int i; + + if (location == 4 && phy_id == np->phys[0]) + np->advertising = value; + + if (mii_preamble_required) + mdio_sync(mdio_addr); + + /* Shift the command bits out. */ + for (i = 31; i >= 0; i--) { + int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; + + writel(dataval, mdio_addr); + mdio_delay(mdio_addr); + writel(dataval | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } + /* Clear out extra bits. */ + for (i = 2; i > 0; i--) { + writel(MDIO_EnbIn, mdio_addr); + mdio_delay(mdio_addr); + writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); + mdio_delay(mdio_addr); + } + return; +} + + +static int netdev_open(struct net_device *dev) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + long ioaddr = dev->base_addr; + int i; + + writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */ + + if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) + return -EAGAIN; + + if (debug > 1) + printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n", + dev->name, dev->irq); + + MOD_INC_USE_COUNT; + + init_ring(dev); + + writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr); + writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr); + + for (i = 0; i < 6; i++) + writeb(dev->dev_addr[i], ioaddr + StationAddr + i); + + /* Initialize other registers. */ + /* Configure the PCI bus bursts and FIFO thresholds. + 486: Set 8 longword cache alignment, 8 longword burst. + 586: Set 16 longword cache alignment, no burst limit. + Cache alignment bits 15:14 Burst length 13:8 + 0000 <not allowed> 0000 align to cache 0800 8 longwords + 4000 8 longwords 0100 1 longword 1000 16 longwords + 8000 16 longwords 0200 2 longwords 2000 32 longwords + C000 32 longwords 0400 4 longwords + Wait the specified 50 PCI cycles after a reset by initializing + Tx and Rx queues and the address filter list. */ +#if defined(__powerpc__) /* Big-endian */ + writel(0x00100080 | 0xE010, ioaddr + PCIBusCfg); +#elif defined(__alpha__) + writel(0xE010, ioaddr + PCIBusCfg); +#elif defined(__i386__) +#if defined(MODULE) + writel(0xE010, ioaddr + PCIBusCfg); +#else + /* When not a module we can work around broken '486 PCI boards. */ +#if (LINUX_VERSION_CODE > 0x2014c) +#define x86 boot_cpu_data.x86 +#endif + writel((x86 <= 4 ? 0x4810 : 0xE010), ioaddr + PCIBusCfg); + if (x86 <= 4) + printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache " + "alignment to %x.\n", dev->name, + (x86 <= 4 ? 0x4810 : 0x8010)); +#endif +#else + writel(0xE010, ioaddr + PCIBusCfg); +#warning Processor architecture undefined! +#endif + + if (dev->if_port == 0) + dev->if_port = np->default_port; + + dev->tbusy = 0; + dev->interrupt = 0; + + writel(0, dev->base_addr + RxStartDemand); + np->csr6 = 0x20022002; + check_duplex(dev); + set_rx_mode(dev); + + dev->start = 1; + + /* Clear and Enable interrupts by setting the interrupt mask. */ + writel(0x1A0F5, ioaddr + IntrStatus); + writel(0x1A0F5, ioaddr + IntrEnable); + + if (debug > 2) + printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); + + /* Set the timer to check for link beat. */ + init_timer(&np->timer); + np->timer.expires = jiffies + 3*HZ; + np->timer.data = (unsigned long)dev; + np->timer.function = &netdev_timer; /* timer handler */ + add_timer(&np->timer); + + return 0; +} + +static void check_duplex(struct net_device *dev) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + int mii_reg5 = mdio_read(dev, np->phys[0], 5); + int duplex; + + if (np->duplex_lock || mii_reg5 == 0xffff) + return; + duplex = (mii_reg5 & 0x0100) || (mii_reg5 & 0x01C0) == 0x0040; + if (np->full_duplex != duplex) { + np->full_duplex = duplex; + if (debug) + printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" + " partner capability of %4.4x.\n", dev->name, + duplex ? "full" : "half", np->phys[0], mii_reg5); + np->csr6 &= ~0x200; + np->csr6 |= duplex ? 0x200 : 0; + } +} + +static void netdev_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct netdev_private *np = (struct netdev_private *)dev->priv; + long ioaddr = dev->base_addr; + int next_tick = 10*HZ; + int old_csr6 = np->csr6; + + if (debug > 2) + printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x " + "config %8.8x.\n", + dev->name, readl(ioaddr + IntrStatus), + readl(ioaddr + NetworkConfig)); + check_duplex(dev); + if (np->csr6 != old_csr6) { + writel(np->csr6 & ~0x0002, ioaddr + NetworkConfig); + writel(np->csr6 | 0x2002, ioaddr + NetworkConfig); + } + np->timer.expires = jiffies + next_tick; + add_timer(&np->timer); +} + +static void tx_timeout(struct net_device *dev) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + long ioaddr = dev->base_addr; + + printk(KERN_WARNING "%s: Transmit timed out, status %8.8x," + " resetting...\n", dev->name, readl(ioaddr + IntrStatus)); + +#ifndef __alpha__ + { + int i; + printk(KERN_DEBUG " Rx ring %8.8x: ", (int)np->rx_ring); + for (i = 0; i < RX_RING_SIZE; i++) + printk(" %8.8x", (unsigned int)np->rx_ring[i].status); + printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)np->tx_ring); + for (i = 0; i < TX_RING_SIZE; i++) + printk(" %4.4x", np->tx_ring[i].status); + printk("\n"); + } +#endif + + /* Perhaps we should reinitialize the hardware here. Just trigger a + Tx demand for now. */ + writel(0, dev->base_addr + TxStartDemand); + dev->if_port = 0; + /* Stop and restart the chip's Tx processes . */ + + dev->trans_start = jiffies; + np->stats.tx_errors++; + return; +} + + +/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ +static void init_ring(struct net_device *dev) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + int i; + + np->tx_full = 0; + np->tx_q_bytes = np->cur_rx = np->cur_tx = 0; + np->dirty_rx = np->dirty_tx = 0; + + np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); + np->rx_head_desc = &np->rx_ring[0]; + + /* Initial all Rx descriptors. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].length = np->rx_buf_sz; + np->rx_ring[i].status = 0; + np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]); + np->rx_skbuff[i] = 0; + } + /* Mark the last entry as wrapping the ring. */ + np->rx_ring[i-1].length |= DescEndRing; + np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]); + + /* Fill in the Rx buffers. Handle allocation failure gracefully. */ + for (i = 0; i < RX_RING_SIZE; i++) { + struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); + np->rx_skbuff[i] = skb; + if (skb == NULL) + break; + skb->dev = dev; /* Mark as being used by this device. */ + np->rx_ring[i].buffer1 = virt_to_bus(skb->tail); + np->rx_ring[i].status = DescOwn | DescIntr; + } + np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); + + for (i = 0; i < TX_RING_SIZE; i++) { + np->tx_skbuff[i] = 0; + np->tx_ring[i].status = 0; + } + return; +} + +static int start_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + unsigned entry; + + /* Block a timer-based transmit from overlapping. This could better be + done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ + if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) { + if (jiffies - dev->trans_start > TX_TIMEOUT) + tx_timeout(dev); + return 1; + } + + /* Caution: the write order is important here, set the field + with the "ownership" bits last. */ + + /* Calculate the next Tx descriptor entry. */ + entry = np->cur_tx % TX_RING_SIZE; + + np->tx_skbuff[entry] = skb; + np->tx_ring[entry].buffer1 = virt_to_bus(skb->data); + +#define one_buffer +#define BPT 1022 +#if defined(one_buffer) + np->tx_ring[entry].length = DescWholePkt | skb->len; + if (entry >= TX_RING_SIZE-1) /* Wrap ring */ + np->tx_ring[entry].length |= DescIntr | DescEndRing; + np->tx_ring[entry].status = DescOwn; + np->cur_tx++; +#elif defined(two_buffer) + if (skb->len > BPT) { + unsigned int entry1 = ++np->cur_tx % TX_RING_SIZE; + np->tx_ring[entry].length = DescStartPkt | BPT; + np->tx_ring[entry1].length = DescEndPkt | (skb->len - BPT); + np->tx_ring[entry1].buffer1 = virt_to_bus(skb->data) + BPT; + np->tx_ring[entry1].status = DescOwn; + np->tx_ring[entry].status = DescOwn; + if (entry >= TX_RING_SIZE-1) + np->tx_ring[entry].length |= DescIntr|DescEndRing; + else if (entry1 >= TX_RING_SIZE-1) + np->tx_ring[entry1].length |= DescIntr|DescEndRing; + np->cur_tx++; + } else { + np->tx_ring[entry].length = DescWholePkt | skb->len; + if (entry >= TX_RING_SIZE-1) /* Wrap ring */ + np->tx_ring[entry].length |= DescIntr | DescEndRing; + np->tx_ring[entry].status = DescOwn; + np->cur_tx++; + } +#elif defined(split_buffer) + { + /* Work around the Tx-FIFO-full bug by splitting our transmit packet + into two pieces, the first which may be loaded without overflowing + the FIFO, and the second which contains the remainder of the + packet. When we get a Tx-done interrupt that frees enough room + in the FIFO we mark the remainder of the packet as loadable. + + This has the problem that the Tx descriptors are written both + here and in the interrupt handler. + */ + + int buf1size = TX_FIFO_SIZE - np->tx_q_bytes; + int buf2size = skb->len - buf1size; + + if (buf2size <= 0) { /* We fit into one descriptor. */ + np->tx_ring[entry].length = DescWholePkt | skb->len; + } else { /* We must use two descriptors. */ + unsigned int entry2; + np->tx_ring[entry].length = DescIntr | DescStartPkt | buf1size; + if (entry >= TX_RING_SIZE-1) { /* Wrap ring */ + np->tx_ring[entry].length |= DescEndRing; + entry2 = 0; + } else + entry2 = entry + 1; + np->cur_tx++; + np->tx_ring[entry2].buffer1 = virt_to_bus(skb->data + buf1size); + np->tx_ring[entry2].length = DescEndPkt | buf2size; + if (entry2 >= TX_RING_SIZE-1) /* Wrap ring */ + np->tx_ring[entry2].length |= DescEndRing; + } + np->tx_ring[entry].status = DescOwn; + np->cur_tx++; + } +#endif + np->tx_q_bytes += skb->len; + writel(0, dev->base_addr + TxStartDemand); + + /* Work around horrible bug in the chip by marking the queue as full + when we do not have FIFO room for a maximum sized packet. */ + if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN) + np->tx_full = 1; + else if ((np->flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT) + np->tx_full = 1; + else + clear_bit(0, (void*)&dev->tbusy); /* Typical path */ + + dev->trans_start = jiffies; + + if (debug > 4) { + printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", + dev->name, np->cur_tx, entry); + } + return 0; +} + +/* The interrupt handler does all of the Rx thread work and cleans up + after the Tx thread. */ +static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs) +{ + struct net_device *dev = (struct net_device *)dev_instance; + struct netdev_private *np = (struct netdev_private *)dev->priv; + long ioaddr = dev->base_addr; + int work_limit = max_interrupt_work; + +#if defined(__i386__) + /* A lock to prevent simultaneous entry bug on Intel SMP machines. */ + if (test_and_set_bit(0, (void*)&dev->interrupt)) { + printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n", + dev->name); + dev->interrupt = 0; /* Avoid halting machine. */ + return; + } +#endif + + do { + u32 intr_status = readl(ioaddr + IntrStatus); + + /* Acknowledge all of the current interrupt sources ASAP. */ + writel(intr_status & 0x001ffff, ioaddr + IntrStatus); + + if (debug > 4) + printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", + dev->name, intr_status); + + if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) + break; + + if (intr_status & (IntrRxDone | RxNoBuf)) + netdev_rx(dev); + + for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { + int entry = np->dirty_tx % TX_RING_SIZE; + int tx_status = np->tx_ring[entry].status; + + if (tx_status < 0) + break; + if (tx_status & 0x8000) { /* There was an error, log it. */ +#ifndef final_version + if (debug > 1) + printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", + dev->name, tx_status); +#endif + np->stats.tx_errors++; + if (tx_status & 0x0104) np->stats.tx_aborted_errors++; + if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; + if (tx_status & 0x0200) np->stats.tx_window_errors++; + if (tx_status & 0x0002) np->stats.tx_fifo_errors++; + if ((tx_status & 0x0080) && np->full_duplex == 0) + np->stats.tx_heartbeat_errors++; +#ifdef ETHER_STATS + if (tx_status & 0x0100) np->stats.collisions16++; +#endif + } else { +#ifdef ETHER_STATS + if (tx_status & 0x0001) np->stats.tx_deferred++; +#endif +#if LINUX_VERSION_CODE > 0x20127 + np->stats.tx_bytes += np->tx_ring[entry].length & 0x7ff; +#endif + np->stats.collisions += (tx_status >> 3) & 15; + np->stats.tx_packets++; + } + /* Free the original skb. */ + np->tx_q_bytes -= np->tx_skbuff[entry]->len; + dev_free_skb(np->tx_skbuff[entry]); + np->tx_skbuff[entry] = 0; + } + if (np->tx_full && + np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4 + && np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { + /* The ring is no longer full, clear tbusy. */ + np->tx_full = 0; + clear_bit(0, (void*)&dev->tbusy); + netif_wake_queue(dev); + } + + /* Abnormal error summary/uncommon events handlers. */ + if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr | + TimerInt | IntrTxStopped)) + netdev_error(dev, intr_status); + + if (--work_limit < 0) { + printk(KERN_WARNING "%s: Too much work at interrupt, " + "status=0x%4.4x.\n", dev->name, intr_status); + /* Set the timer to re-enable the other interrupts after + 10*82usec ticks. */ + writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable); + writel(10, ioaddr + GPTimer); + break; + } + } while (1); + + if (debug > 3) + printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", + dev->name, readl(ioaddr + IntrStatus)); + +#if defined(__i386__) + clear_bit(0, (void*)&dev->interrupt); +#else + dev->interrupt = 0; +#endif + return; +} + +/* This routine is logically part of the interrupt handler, but separated + for clarity and better register allocation. */ +static int netdev_rx(struct net_device *dev) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + int entry = np->cur_rx % RX_RING_SIZE; + int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; + + if (debug > 4) { + printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n", + entry, np->rx_ring[entry].status); + } + + /* If EOP is set on the next entry, it's a new packet. Send it up. */ + while (np->rx_head_desc->status >= 0) { + struct w840_rx_desc *desc = np->rx_head_desc; + s32 status = desc->status; + + if (debug > 4) + printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", + status); + if (--work_limit < 0) + break; + if ((status & 0x38008300) != 0x0300) { + if ((status & 0x38000300) != 0x0300) { + /* Ingore earlier buffers. */ + if ((status & 0xffff) != 0x7fff) { + printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " + "multiple buffers, entry %#x status %4.4x!\n", + dev->name, np->cur_rx, status); + np->stats.rx_length_errors++; + } + } else if (status & 0x8000) { + /* There was a fatal error. */ + if (debug > 2) + printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n", + dev->name, status); + np->stats.rx_errors++; /* end of a packet.*/ + if (status & 0x0890) np->stats.rx_length_errors++; + if (status & 0x004C) np->stats.rx_frame_errors++; + if (status & 0x0002) np->stats.rx_crc_errors++; + } + } else { + struct sk_buff *skb; + /* Omit the four octet CRC from the length. */ + short pkt_len = ((status >> 16) & 0x7ff) - 4; + +#ifndef final_version + if (debug > 4) + printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" + " status %x.\n", pkt_len, status); +#endif + /* Check if the packet is long enough to accept without copying + to a minimally-sized skbuff. */ + if (pkt_len < rx_copybreak + && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { + skb->dev = dev; + skb_reserve(skb, 2); /* 16 byte align the IP header */ + /* Call copy + cksum if available. */ +#if ! defined(__alpha__) + eth_copy_and_sum(skb, bus_to_virt(desc->buffer1), + pkt_len, 0); + skb_put(skb, pkt_len); +#else + memcpy(skb_put(skb, pkt_len), + bus_to_virt(desc->buffer1), pkt_len); +#endif + } else { + char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len); + np->rx_skbuff[entry] = NULL; +#ifndef final_version /* Remove after testing. */ + if (bus_to_virt(desc->buffer1) != temp) + printk(KERN_ERR "%s: Internal fault: The skbuff addresses " + "do not match in netdev_rx: %p vs. %p / %p.\n", + dev->name, bus_to_virt(desc->buffer1), + skb->head, temp); +#endif + } +#ifndef final_version /* Remove after testing. */ + /* You will want this info for the initial debug. */ + if (debug > 5) + printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:" + "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x " + "%d.%d.%d.%d.\n", + skb->data[0], skb->data[1], skb->data[2], skb->data[3], + skb->data[4], skb->data[5], skb->data[6], skb->data[7], + skb->data[8], skb->data[9], skb->data[10], + skb->data[11], skb->data[12], skb->data[13], + skb->data[14], skb->data[15], skb->data[16], + skb->data[17]); +#endif + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + dev->last_rx = jiffies; + np->stats.rx_packets++; +#if LINUX_VERSION_CODE > 0x20127 + np->stats.rx_bytes += pkt_len; +#endif + } + entry = (++np->cur_rx) % RX_RING_SIZE; + np->rx_head_desc = &np->rx_ring[entry]; + } + + /* Refill the Rx ring buffers. */ + for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { + struct sk_buff *skb; + entry = np->dirty_rx % RX_RING_SIZE; + if (np->rx_skbuff[entry] == NULL) { + skb = dev_alloc_skb(np->rx_buf_sz); + np->rx_skbuff[entry] = skb; + if (skb == NULL) + break; /* Better luck next round. */ + skb->dev = dev; /* Mark as being used by this device. */ + np->rx_ring[entry].buffer1 = virt_to_bus(skb->tail); + } + np->rx_ring[entry].status = DescOwn; + } + + return 0; +} + +static void netdev_error(struct net_device *dev, int intr_status) +{ + long ioaddr = dev->base_addr; + struct netdev_private *np = (struct netdev_private *)dev->priv; + + if (debug > 2) + printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n", + dev->name, intr_status); + if (intr_status == 0xffffffff) + return; + if (intr_status & TxFIFOUnderflow) { + np->csr6 += 0x4000; /* Bump up the Tx threshold */ + printk(KERN_DEBUG "%s: Tx underflow, increasing threshold to %8.8x.\n", + dev->name, np->csr6); + writel(np->csr6, ioaddr + NetworkConfig); + } + if (intr_status & IntrRxDied) { /* Missed a Rx frame. */ + np->stats.rx_errors++; + } + if (intr_status & TimerInt) { + /* Re-enable other interrupts. */ + writel(0x1A0F5, ioaddr + IntrEnable); + } + np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff; + writel(0, dev->base_addr + RxStartDemand); +} + +static struct net_device_stats *get_stats(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + struct netdev_private *np = (struct netdev_private *)dev->priv; + + /* The chip only need report frame silently dropped. */ + if (dev->start) + np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff; + + return &np->stats; +} + +static unsigned const ethernet_polynomial = 0x04c11db7U; +static inline u32 ether_crc(int length, unsigned char *data) +{ + int crc = -1; + + while(--length >= 0) { + unsigned char current_octet = *data++; + int bit; + for (bit = 0; bit < 8; bit++, current_octet >>= 1) { + crc = (crc << 1) ^ + ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); + } + } + return crc; +} + +static void set_rx_mode(struct net_device *dev) +{ + struct netdev_private *np = (struct netdev_private *)dev->priv; + long ioaddr = dev->base_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + u32 rx_mode; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + /* Unconditionally log net taps. */ + printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys + | AcceptMyPhys; + } else if ((dev->mc_count > multicast_filter_limit) + || (dev->flags & IFF_ALLMULTI)) { + /* Too many to match, or accept all multicasts. */ + memset(mc_filter, 0xff, sizeof(mc_filter)); + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + } else { + struct dev_mc_list *mclist; + int i; + memset(mc_filter, 0, sizeof(mc_filter)); + for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; + i++, mclist = mclist->next) { + set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F, + mc_filter); + } + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + } + writel(mc_filter[0], ioaddr + MulticastFilter0); + writel(mc_filter[1], ioaddr + MulticastFilter1); + np->csr6 &= ~0x00F8; + np->csr6 |= rx_mode; + writel(np->csr6, ioaddr + NetworkConfig); +} + +static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + u16 *data = (u16 *)&rq->ifr_data; + + switch(cmd) { + case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ + data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f; + /* Fall Through */ + case SIOCDEVPRIVATE+1: /* Read the specified MII register. */ + data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f); + return 0; + case SIOCDEVPRIVATE+2: /* Write the specified MII register */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int netdev_close(struct net_device *dev) +{ + long ioaddr = dev->base_addr; + struct netdev_private *np = (struct netdev_private *)dev->priv; + int i; + + dev->start = 0; + dev->tbusy = 1; + + if (debug > 1) { + printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x " + "Config %8.8x.\n", dev->name, readl(ioaddr + IntrStatus), + readl(ioaddr + NetworkConfig)); + printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", + dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); + } + + /* Disable interrupts by clearing the interrupt mask. */ + writel(0x0000, ioaddr + IntrEnable); + + /* Stop the chip's Tx and Rx processes. */ + writel(np->csr6 &= ~0x20FA, ioaddr + NetworkConfig); + + del_timer(&np->timer); + if (readl(ioaddr + NetworkConfig) != 0xffffffff) + np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff; + +#ifdef __i386__ + if (debug > 2) { + printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", + (int)virt_to_bus(np->tx_ring)); + for (i = 0; i < TX_RING_SIZE; i++) + printk(" #%d desc. %4.4x %4.4x %8.8x.\n", + i, np->tx_ring[i].length, + np->tx_ring[i].status, np->tx_ring[i].buffer1); + printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", + (int)virt_to_bus(np->rx_ring)); + for (i = 0; i < RX_RING_SIZE; i++) { + printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", + i, np->rx_ring[i].length, + np->rx_ring[i].status, np->rx_ring[i].buffer1); + } + } +#endif /* __i386__ debugging only */ + + free_irq(dev->irq, dev); + + /* Free all the skbuffs in the Rx queue. */ + for (i = 0; i < RX_RING_SIZE; i++) { + np->rx_ring[i].status = 0; + if (np->rx_skbuff[i]) { +#if LINUX_VERSION_CODE < 0x20100 + np->rx_skbuff[i]->free = 1; +#endif + dev_free_skb(np->rx_skbuff[i]); + } + np->rx_skbuff[i] = 0; + } + for (i = 0; i < TX_RING_SIZE; i++) { + if (np->tx_skbuff[i]) + dev_free_skb(np->tx_skbuff[i]); + np->tx_skbuff[i] = 0; + } + + MOD_DEC_USE_COUNT; + + return 0; +} + + +#ifdef MODULE +int init_module(void) +{ + if (debug) /* Emit version even if no cards detected. */ + printk(KERN_INFO "%s" KERN_INFO "%s", version, version1); + return netif_pci_probe(pci_tbl, NULL); +} + +void cleanup_module(void) +{ + struct net_device *next_dev; + + /* No need to check MOD_IN_USE, as sys_delete_module() checks. */ + while (root_net_dev) { + next_dev = ((struct netdev_private *)root_net_dev->priv)->next_module; + unregister_netdev(root_net_dev); +#ifdef USE_IO_OPS + release_region(root_net_dev->base_addr, pci_tbl[0].io_size); +#else + iounmap((char *)(root_net_dev->base_addr)); +#endif + kfree(root_net_dev); + root_net_dev = next_dev; + } +} +#else +int winbond840_probe(struct net_device *dev) +{ + if (pci_etherdev_probe(pci_tbl, dev) < 0) + return -ENODEV; + printk(KERN_INFO "%s" KERN_INFO "%s", version, version1); + return 0; +} +#endif /* MODULE */ + + +/* + * Local variables: + * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c winbond-840.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`" + * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c winbond-840.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`" + * simple-compile: "gcc -DMODULE -D__KERNEL__ -O6 -c winbond-840.c" + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 4 + * End: + */