Thomas Falcon [tlfal...@linux.ibm.com] wrote: > Access Control Lists can be defined for each IBM VNIC > adapter at time of creation. MAC address and VLAN ID's > may be specified, as well as a Port VLAN ID (PVID). > These may all be requested though read-only sysfs files: > mac_acl, vlan_acl, and pvid. When these files are read, > a series of Command-Response Queue (CRQ) commands is sent to > firmware. The first command requests the size of the ACL > data. The driver allocates a buffer of this size and passes > the address in a second CRQ command to firmware, which then > writes the ACL data to this buffer. This data is then parsed > and printed to the respective sysfs file. > > Signed-off-by: Thomas Falcon <tlfal...@linux.ibm.com> > --- > drivers/net/ethernet/ibm/ibmvnic.c | 199 > +++++++++++++++++++++++++++++++++++++ > drivers/net/ethernet/ibm/ibmvnic.h | 26 ++++- > 2 files changed, 222 insertions(+), 3 deletions(-) > > diff --git a/drivers/net/ethernet/ibm/ibmvnic.c > b/drivers/net/ethernet/ibm/ibmvnic.c > index 91b9cc3..36dfa69 100644 > --- a/drivers/net/ethernet/ibm/ibmvnic.c > +++ b/drivers/net/ethernet/ibm/ibmvnic.c > @@ -1163,6 +1163,60 @@ static int __ibmvnic_open(struct net_device *netdev) > return rc; > } > > +static int ibmvnic_query_acl_sz(struct ibmvnic_adapter *adapter) > +{ > + union ibmvnic_crq crq; > + > + memset(&crq, 0, sizeof(crq)); > + crq.acl_query.first = IBMVNIC_CRQ_CMD; > + crq.acl_query.cmd = ACL_QUERY; > + > + if (ibmvnic_send_crq(adapter, &crq)) > + return -EIO; > + return 0; > +} > + > +static int ibmvnic_request_acl_buf(struct ibmvnic_adapter *adapter) > +{ > + struct device *dev = &adapter->vdev->dev; > + union ibmvnic_crq rcrq; > + int rc; > + > + rc = 0; > + adapter->acl_buf = kmalloc(adapter->acl_buf_sz, GFP_KERNEL); > + if (!adapter->acl_buf) { > + rc = -ENOMEM; > + goto acl_alloc_err; > + } > + adapter->acl_buf_token = dma_map_single(dev, adapter->acl_buf, > + adapter->acl_buf_sz, > + DMA_FROM_DEVICE); > + if (dma_mapping_error(dev, adapter->acl_buf_token)) { > + rc = -ENOMEM; > + goto acl_dma_err; > + } > + memset(&rcrq, 0, sizeof(rcrq)); > + rcrq.acl_query.first = IBMVNIC_CRQ_CMD; > + rcrq.acl_query.cmd = ACL_QUERY; > + rcrq.acl_query.ioba = cpu_to_be32(adapter->acl_buf_token); > + rcrq.acl_query.len = cpu_to_be32(adapter->acl_buf_sz); > + if (ibmvnic_send_crq(adapter, &rcrq)) { > + rc = -EIO; > + goto acl_query_err; > + } > + return 0; > +acl_query_err: > + dma_unmap_single(dev, adapter->acl_buf_token, > + adapter->acl_buf_sz, DMA_FROM_DEVICE); > + adapter->acl_buf_token = 0; > + adapter->acl_buf_sz = 0; > +acl_dma_err: > + kfree(adapter->acl_buf); > + adapter->acl_buf = NULL; > +acl_alloc_err: > + return rc; > +} > + > static int ibmvnic_open(struct net_device *netdev) > { > struct ibmvnic_adapter *adapter = netdev_priv(netdev); > @@ -4635,6 +4689,25 @@ static int handle_query_phys_parms_rsp(union > ibmvnic_crq *crq, > return rc; > } > > +static void handle_acl_query_rsp(struct ibmvnic_adapter *adapter, > + union ibmvnic_crq *crq) > +{ > + struct net_device *netdev = adapter->netdev; > + u8 rcode; > + > + rcode = crq->acl_query_rsp.rc.code; > + adapter->fw_done_rc = rcode; > + /* NOMEMORY is returned when ACL buffer size request is successful */ > + if (rcode == NOMEMORY) { > + adapter->acl_buf_sz = be32_to_cpu(crq->acl_query_rsp.len); > + netdev_dbg(netdev, "ACL buffer size is %d.\n", > + adapter->acl_buf_sz); > + } else if (rcode != SUCCESS) { > + netdev_err(netdev, "ACL query failed, rc = %u\n", rcode); > + } > + complete(&adapter->fw_done); > +} > + > static void ibmvnic_handle_crq(union ibmvnic_crq *crq, > struct ibmvnic_adapter *adapter) > { > @@ -4798,6 +4871,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, > adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter); > complete(&adapter->fw_done); > break; > + case ACL_QUERY_RSP: > + handle_acl_query_rsp(adapter, crq); > + break; > default: > netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", > gen_crq->cmd); > @@ -5199,6 +5275,9 @@ static int ibmvnic_remove(struct vio_dev *dev) > } > > static struct device_attribute dev_attr_failover; > +static struct device_attribute dev_attr_vlan_acl; > +static struct device_attribute dev_attr_mac_acl; > +static struct device_attribute dev_attr_pvid; > > static ssize_t failover_store(struct device *dev, struct device_attribute > *attr, > const char *buf, size_t count) > @@ -5234,10 +5313,130 @@ static ssize_t failover_store(struct device *dev, > struct device_attribute *attr, > return count; > } > > +static int ibmvnic_get_acls(struct ibmvnic_adapter *adapter) > +{ > + struct net_device *netdev = adapter->netdev; > + int rc; > + > + mutex_lock(&adapter->fw_lock);
Would it be better to hold this lock in the caller acl_show() instead? If we get back to back calls to acl_show(), the thread in acl_show() could free the adapter->acl_buf while this function is still using? > + reinit_completion(&adapter->fw_done); > + adapter->fw_done_rc = 0; > + rc = ibmvnic_query_acl_sz(adapter); > + if (rc) { > + netdev_err(netdev, "Query ACL buffer size failed, rc = %d\n", > + rc); > + goto out; > + } > + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); > + if (rc) { > + netdev_err(netdev, > + "Query ACL buffer size did not complete, rc = %d\n", > + rc); > + goto out; > + } > + /* NOMEMORY is returned when the ACL buffer size is retrieved > + * successfully > + */ > + if (adapter->fw_done_rc != NOMEMORY) { > + netdev_err(netdev, "Unable to get ACL buffer size, rc = %d\n", > + adapter->fw_done_rc); > + rc = -EIO; > + goto out; > + } > + reinit_completion(&adapter->fw_done); > + rc = ibmvnic_request_acl_buf(adapter); > + if (rc) { > + netdev_err(netdev, "ACL buffer request failed, rc = %d\n", rc); > + goto out; > + } > + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); > + if (rc) { > + netdev_err(netdev, > + "ACL buffer request did not complete, rc = %d\n", > + rc); > + goto out; > + } > + if (adapter->fw_done_rc != SUCCESS) { > + netdev_err(netdev, "Unable to retrieve ACL buffer, rc = %d\n", > + adapter->fw_done_rc); > + rc = -EIO; > + } > +out: > + mutex_unlock(&adapter->fw_lock); > + return rc; > +} > + > +static ssize_t acl_show(struct device *dev, > + struct device_attribute *attr, char *buf) > +{ > + struct ibmvnic_acl_buffer *acl_buf; > + struct ibmvnic_adapter *adapter; > + struct net_device *netdev; > + int num_entries; > + ssize_t rsize; > + int offset; > + int rc; > + int i; > + > + rsize = 0; > + netdev = dev_get_drvdata(dev); > + adapter = netdev_priv(netdev); > + rc = ibmvnic_get_acls(adapter); > + if (rc) > + return rc; > + acl_buf = adapter->acl_buf; > + if (attr == &dev_attr_mac_acl) { > + offset = be32_to_cpu(acl_buf->offset_mac_addrs); > + num_entries = be32_to_cpu(acl_buf->num_mac_addrs); > + if (num_entries == 0) > + goto out; > + for (i = 0; i < num_entries; i++) { > + char *entry = (char *)acl_buf + offset + i * 6; > + > + rsize += scnprintf(buf + rsize, PAGE_SIZE, > + "%pM\n", entry); Shouldn't the second parameter be 'PAGE_SIZE-rsize' here and > + } > + } else if (attr == &dev_attr_vlan_acl) { > + offset = be32_to_cpu(acl_buf->offset_vlan_ids); > + num_entries = be32_to_cpu(acl_buf->num_vlan_ids); > + if (num_entries == 0) > + goto out; > + for (i = 0 ; i < num_entries; i++) { > + char *entry = (char *)acl_buf + offset + i * 2; > + > + rsize += scnprintf(buf + rsize, PAGE_SIZE, "%d\n", > + be16_to_cpup((__be16 *)entry)); here? > + } > + } else if (attr == &dev_attr_pvid) { > + u16 pvid, vid; > + u8 pri; > + > + pvid = be16_to_cpu(acl_buf->pvid); > + vid = pvid & VLAN_VID_MASK; > + pri = (pvid & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; > + > + rsize = scnprintf(buf, PAGE_SIZE, "%d\n%d\n", vid, pri); > + } > +out: > + dma_unmap_single(dev, adapter->acl_buf_token, adapter->acl_buf_sz, > + DMA_FROM_DEVICE); > + kfree(adapter->acl_buf); > + adapter->acl_buf = NULL; > + adapter->acl_buf_token = 0; > + adapter->acl_buf_sz = 0; > + return rsize; > +} > + > static DEVICE_ATTR_WO(failover); > +static DEVICE_ATTR(mac_acl, 0444, acl_show, NULL); > +static DEVICE_ATTR(vlan_acl, 0444, acl_show, NULL); > +static DEVICE_ATTR(pvid, 0444, acl_show, NULL); > > static struct attribute *dev_attrs[] = { > &dev_attr_failover.attr, > + &dev_attr_mac_acl.attr, > + &dev_attr_vlan_acl.attr, > + &dev_attr_pvid.attr, > NULL, > }; > > diff --git a/drivers/net/ethernet/ibm/ibmvnic.h > b/drivers/net/ethernet/ibm/ibmvnic.h > index e497392..4768626 100644 > --- a/drivers/net/ethernet/ibm/ibmvnic.h > +++ b/drivers/net/ethernet/ibm/ibmvnic.h > @@ -195,12 +195,15 @@ struct ibmvnic_acl_buffer { > #define INITIAL_VERSION_IOB 1 > u8 mac_acls_restrict; > u8 vlan_acls_restrict; > - u8 reserved1[22]; > + __be16 pvid; > + u8 reserved1[52]; > + __be32 max_mac_addrs; > __be32 num_mac_addrs; > __be32 offset_mac_addrs; > + __be32 max_vlan_ids; > __be32 num_vlan_ids; > __be32 offset_vlan_ids; > - u8 reserved2[80]; > + u8 reserved2[40]; > } __packed __aligned(8); > > /* descriptors have been changed, how should this be defined? 1? 4? */ > @@ -585,6 +588,19 @@ struct ibmvnic_acl_query { > u8 reserved2[4]; > } __packed __aligned(8); > > +struct ibmvnic_acl_query_rsp { > + u8 first; > + u8 cmd; > +#define ACL_EXISTS 0x8000 > +#define VLAN_ACL_ON 0x4000 > +#define MAC_ACL_ON 0x2000 > +#define PVID_ON 0x1000 > + __be16 flags; > + u8 reserved[4]; > + __be32 len; > + struct ibmvnic_rc rc; > +} __packed __aligned(8); > + > struct ibmvnic_tune { > u8 first; > u8 cmd; > @@ -695,7 +711,7 @@ struct ibmvnic_query_map_rsp { > struct ibmvnic_get_vpd get_vpd; > struct ibmvnic_get_vpd_rsp get_vpd_rsp; > struct ibmvnic_acl_query acl_query; > - struct ibmvnic_generic_crq acl_query_rsp; > + struct ibmvnic_acl_query_rsp acl_query_rsp; > struct ibmvnic_tune tune; > struct ibmvnic_generic_crq tune_rsp; > struct ibmvnic_request_map request_map; > @@ -1001,6 +1017,10 @@ struct ibmvnic_adapter { > dma_addr_t login_rsp_buf_token; > int login_rsp_buf_sz; > > + struct ibmvnic_acl_buffer *acl_buf; > + dma_addr_t acl_buf_token; > + int acl_buf_sz; > + > atomic_t running_cap_crqs; > bool wait_capability; > > -- > 1.8.3.1