Loading arch/x86_64/kernel/smpboot.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void) { { if (notscsync || !cpu_has_tsc) if (notscsync || !cpu_has_tsc) return; return; sync_tsc(boot_cpu_id); sync_tsc(0); } } static __init int notscsync_setup(char *s) static __init int notscsync_setup(char *s) Loading drivers/acpi/motherboard.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -42,7 +42,7 @@ ACPI_MODULE_NAME("acpi_motherboard") */ */ #define IS_RESERVED_ADDR(base, len) \ #define IS_RESERVED_ADDR(base, len) \ (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \ (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \ && ((base) + (len) > 0x1000)) && ((base) + (len) > PCIBIOS_MIN_IO)) /* /* * Clearing the flag (IORESOURCE_BUSY) allows drivers to use * Clearing the flag (IORESOURCE_BUSY) allows drivers to use * the io ports if they really know they can use it, while * the io ports if they really know they can use it, while Loading drivers/char/mem.c +8 −4 Original line number Original line Diff line number Diff line Loading @@ -261,7 +261,11 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) static int mmap_kmem(struct file * file, struct vm_area_struct * vma) static int mmap_kmem(struct file * file, struct vm_area_struct * vma) { { unsigned long long val; unsigned long pfn; /* Turn a kernel-virtual address into a physical page frame */ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; /* /* * RED-PEN: on some architectures there is more mapped memory * RED-PEN: on some architectures there is more mapped memory * than available in mem_map which pfn_valid checks * than available in mem_map which pfn_valid checks Loading @@ -269,10 +273,10 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma) * * * RED-PEN: vmalloc is not supported right now. * RED-PEN: vmalloc is not supported right now. */ */ if (!pfn_valid(vma->vm_pgoff)) if (!pfn_valid(pfn)) return -EIO; return -EIO; val = (u64)vma->vm_pgoff << PAGE_SHIFT; vma->vm_pgoff = __pa(val) >> PAGE_SHIFT; vma->vm_pgoff = pfn; return mmap_mem(file, vma); return mmap_mem(file, vma); } } Loading drivers/net/e1000/e1000_main.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_clean_tx_irq(adapter); enable_irq(adapter->pdev->irq); enable_irq(adapter->pdev->irq); } } #endif #endif Loading drivers/scsi/dc395x.c +13 −35 Original line number Original line Diff line number Diff line Loading @@ -183,7 +183,7 @@ * cross a page boundy. * cross a page boundy. */ */ #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) #define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY) struct SGentry { struct SGentry { u32 address; /* bus! address */ u32 address; /* bus! address */ Loading Loading @@ -235,7 +235,6 @@ struct ScsiReqBlk { u8 sg_count; /* No of HW sg entries for this request */ u8 sg_count; /* No of HW sg entries for this request */ u8 sg_index; /* Index of HW sg entry for this request */ u8 sg_index; /* Index of HW sg entry for this request */ u32 total_xfer_length; /* Total number of bytes remaining to be transfered */ u32 total_xfer_length; /* Total number of bytes remaining to be transfered */ void **virt_map; unsigned char *virt_addr; /* Virtual address of current transfer position */ unsigned char *virt_addr; /* Virtual address of current transfer position */ /* /* Loading Loading @@ -1022,14 +1021,14 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, reqlen, cmd->request_buffer, cmd->use_sg, reqlen, cmd->request_buffer, cmd->use_sg, srb->sg_count); srb->sg_count); srb->virt_addr = page_address(sl->page); for (i = 0; i < srb->sg_count; i++) { for (i = 0; i < srb->sg_count; i++) { u32 seglen = (u32)sg_dma_len(sl + i); u32 busaddr = (u32)sg_dma_address(&sl[i]); sgp[i].address = (u32)sg_dma_address(sl + i); u32 seglen = (u32)sl[i].length; sgp[i].address = busaddr; sgp[i].length = seglen; sgp[i].length = seglen; srb->total_xfer_length += seglen; srb->total_xfer_length += seglen; srb->virt_map[i] = kmap(sl[i].page); } } srb->virt_addr = srb->virt_map[0]; sgp += srb->sg_count - 1; sgp += srb->sg_count - 1; /* /* Loading Loading @@ -1976,7 +1975,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) int segment = cmd->use_sg; int segment = cmd->use_sg; u32 xferred = srb->total_xfer_length - left; /* bytes transfered */ u32 xferred = srb->total_xfer_length - left; /* bytes transfered */ struct SGentry *psge = srb->segment_x + srb->sg_index; struct SGentry *psge = srb->segment_x + srb->sg_index; void **virt = srb->virt_map; dprintkdbg(DBG_0, dprintkdbg(DBG_0, "sg_update_list: Transfered %i of %i bytes, %i remain\n", "sg_update_list: Transfered %i of %i bytes, %i remain\n", Loading Loading @@ -2016,16 +2014,16 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) /* We have to walk the scatterlist to find it */ /* We have to walk the scatterlist to find it */ sg = (struct scatterlist *)cmd->request_buffer; sg = (struct scatterlist *)cmd->request_buffer; idx = 0; while (segment--) { while (segment--) { unsigned long mask = unsigned long mask = ~((unsigned long)sg->length - 1) & PAGE_MASK; ~((unsigned long)sg->length - 1) & PAGE_MASK; if ((sg_dma_address(sg) & mask) == (psge->address & mask)) { if ((sg_dma_address(sg) & mask) == (psge->address & mask)) { srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK); srb->virt_addr = (page_address(sg->page) + psge->address - (psge->address & PAGE_MASK)); return; return; } } ++sg; ++sg; ++idx; } } dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n"); dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n"); Loading Loading @@ -2151,7 +2149,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); } } /* /* * calculate all the residue data that not yet transfered * calculate all the residue data that not yet tranfered * SCSI transfer counter + left in SCSI FIFO data * SCSI transfer counter + left in SCSI FIFO data * * * .....TRM_S1040_SCSI_COUNTER (24bits) * .....TRM_S1040_SCSI_COUNTER (24bits) Loading Loading @@ -3269,7 +3267,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) struct scsi_cmnd *cmd = srb->cmd; struct scsi_cmnd *cmd = srb->cmd; enum dma_data_direction dir = cmd->sc_data_direction; enum dma_data_direction dir = cmd->sc_data_direction; if (cmd->use_sg && dir != PCI_DMA_NONE) { if (cmd->use_sg && dir != PCI_DMA_NONE) { int i; /* unmap DC395x SG list */ /* unmap DC395x SG list */ dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", srb->sg_bus_addr, SEGMENTX_LEN); srb->sg_bus_addr, SEGMENTX_LEN); Loading @@ -3279,8 +3276,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", cmd->use_sg, cmd->request_buffer); cmd->use_sg, cmd->request_buffer); /* unmap the sg segments */ /* unmap the sg segments */ for (i = 0; i < srb->sg_count; i++) kunmap(virt_to_page(srb->virt_map[i])); pci_unmap_sg(acb->dev, pci_unmap_sg(acb->dev, (struct scatterlist *)cmd->request_buffer, (struct scatterlist *)cmd->request_buffer, cmd->use_sg, dir); cmd->use_sg, dir); Loading Loading @@ -3327,7 +3322,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, if (cmd->use_sg) { if (cmd->use_sg) { struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer; struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer; ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset); ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset); } else { } else { ptr = (struct ScsiInqData *)(cmd->request_buffer); ptr = (struct ScsiInqData *)(cmd->request_buffer); } } Loading Loading @@ -4262,9 +4257,8 @@ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb) const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) if (acb->srb_array[i].segment_x) kfree(acb->srb_array[i].segment_x); kfree(acb->srb_array[i].segment_x); vfree(acb->srb_array[0].virt_map); } } Loading @@ -4280,12 +4274,9 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) int srb_idx = 0; int srb_idx = 0; unsigned i = 0; unsigned i = 0; struct SGentry *ptr; struct SGentry *ptr; void **virt_array; for (i = 0; i < DC395x_MAX_SRB_CNT; i++) { for (i = 0; i < DC395x_MAX_SRB_CNT; i++) acb->srb_array[i].segment_x = NULL; acb->srb_array[i].segment_x = NULL; acb->srb_array[i].virt_map = NULL; } dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); while (pages--) { while (pages--) { Loading @@ -4306,19 +4297,6 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) ptr + (i * DC395x_MAX_SG_LISTENTRY); ptr + (i * DC395x_MAX_SG_LISTENTRY); else else dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*)); if (!virt_array) { adapter_sg_tables_free(acb); return 1; } for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) { acb->srb_array[i].virt_map = virt_array; virt_array += DC395x_MAX_SG_LISTENTRY; } return 0; return 0; } } Loading Loading
arch/x86_64/kernel/smpboot.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -334,7 +334,7 @@ static void __cpuinit tsc_sync_wait(void) { { if (notscsync || !cpu_has_tsc) if (notscsync || !cpu_has_tsc) return; return; sync_tsc(boot_cpu_id); sync_tsc(0); } } static __init int notscsync_setup(char *s) static __init int notscsync_setup(char *s) Loading
drivers/acpi/motherboard.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -42,7 +42,7 @@ ACPI_MODULE_NAME("acpi_motherboard") */ */ #define IS_RESERVED_ADDR(base, len) \ #define IS_RESERVED_ADDR(base, len) \ (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \ (((len) > 0) && ((base) > 0) && ((base) + (len) < IO_SPACE_LIMIT) \ && ((base) + (len) > 0x1000)) && ((base) + (len) > PCIBIOS_MIN_IO)) /* /* * Clearing the flag (IORESOURCE_BUSY) allows drivers to use * Clearing the flag (IORESOURCE_BUSY) allows drivers to use * the io ports if they really know they can use it, while * the io ports if they really know they can use it, while Loading
drivers/char/mem.c +8 −4 Original line number Original line Diff line number Diff line Loading @@ -261,7 +261,11 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) static int mmap_kmem(struct file * file, struct vm_area_struct * vma) static int mmap_kmem(struct file * file, struct vm_area_struct * vma) { { unsigned long long val; unsigned long pfn; /* Turn a kernel-virtual address into a physical page frame */ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; /* /* * RED-PEN: on some architectures there is more mapped memory * RED-PEN: on some architectures there is more mapped memory * than available in mem_map which pfn_valid checks * than available in mem_map which pfn_valid checks Loading @@ -269,10 +273,10 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma) * * * RED-PEN: vmalloc is not supported right now. * RED-PEN: vmalloc is not supported right now. */ */ if (!pfn_valid(vma->vm_pgoff)) if (!pfn_valid(pfn)) return -EIO; return -EIO; val = (u64)vma->vm_pgoff << PAGE_SHIFT; vma->vm_pgoff = __pa(val) >> PAGE_SHIFT; vma->vm_pgoff = pfn; return mmap_mem(file, vma); return mmap_mem(file, vma); } } Loading
drivers/net/e1000/e1000_main.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_clean_tx_irq(adapter); enable_irq(adapter->pdev->irq); enable_irq(adapter->pdev->irq); } } #endif #endif Loading
drivers/scsi/dc395x.c +13 −35 Original line number Original line Diff line number Diff line Loading @@ -183,7 +183,7 @@ * cross a page boundy. * cross a page boundy. */ */ #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY) #define VIRTX_LEN (sizeof(void *) * DC395x_MAX_SG_LISTENTRY) struct SGentry { struct SGentry { u32 address; /* bus! address */ u32 address; /* bus! address */ Loading Loading @@ -235,7 +235,6 @@ struct ScsiReqBlk { u8 sg_count; /* No of HW sg entries for this request */ u8 sg_count; /* No of HW sg entries for this request */ u8 sg_index; /* Index of HW sg entry for this request */ u8 sg_index; /* Index of HW sg entry for this request */ u32 total_xfer_length; /* Total number of bytes remaining to be transfered */ u32 total_xfer_length; /* Total number of bytes remaining to be transfered */ void **virt_map; unsigned char *virt_addr; /* Virtual address of current transfer position */ unsigned char *virt_addr; /* Virtual address of current transfer position */ /* /* Loading Loading @@ -1022,14 +1021,14 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, reqlen, cmd->request_buffer, cmd->use_sg, reqlen, cmd->request_buffer, cmd->use_sg, srb->sg_count); srb->sg_count); srb->virt_addr = page_address(sl->page); for (i = 0; i < srb->sg_count; i++) { for (i = 0; i < srb->sg_count; i++) { u32 seglen = (u32)sg_dma_len(sl + i); u32 busaddr = (u32)sg_dma_address(&sl[i]); sgp[i].address = (u32)sg_dma_address(sl + i); u32 seglen = (u32)sl[i].length; sgp[i].address = busaddr; sgp[i].length = seglen; sgp[i].length = seglen; srb->total_xfer_length += seglen; srb->total_xfer_length += seglen; srb->virt_map[i] = kmap(sl[i].page); } } srb->virt_addr = srb->virt_map[0]; sgp += srb->sg_count - 1; sgp += srb->sg_count - 1; /* /* Loading Loading @@ -1976,7 +1975,6 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) int segment = cmd->use_sg; int segment = cmd->use_sg; u32 xferred = srb->total_xfer_length - left; /* bytes transfered */ u32 xferred = srb->total_xfer_length - left; /* bytes transfered */ struct SGentry *psge = srb->segment_x + srb->sg_index; struct SGentry *psge = srb->segment_x + srb->sg_index; void **virt = srb->virt_map; dprintkdbg(DBG_0, dprintkdbg(DBG_0, "sg_update_list: Transfered %i of %i bytes, %i remain\n", "sg_update_list: Transfered %i of %i bytes, %i remain\n", Loading Loading @@ -2016,16 +2014,16 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left) /* We have to walk the scatterlist to find it */ /* We have to walk the scatterlist to find it */ sg = (struct scatterlist *)cmd->request_buffer; sg = (struct scatterlist *)cmd->request_buffer; idx = 0; while (segment--) { while (segment--) { unsigned long mask = unsigned long mask = ~((unsigned long)sg->length - 1) & PAGE_MASK; ~((unsigned long)sg->length - 1) & PAGE_MASK; if ((sg_dma_address(sg) & mask) == (psge->address & mask)) { if ((sg_dma_address(sg) & mask) == (psge->address & mask)) { srb->virt_addr = virt[idx] + (psge->address & ~PAGE_MASK); srb->virt_addr = (page_address(sg->page) + psge->address - (psge->address & PAGE_MASK)); return; return; } } ++sg; ++sg; ++idx; } } dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n"); dprintkl(KERN_ERR, "sg_update_list: sg_to_virt failed\n"); Loading Loading @@ -2151,7 +2149,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb, DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); DC395x_read32(acb, TRM_S1040_DMA_CXCNT)); } } /* /* * calculate all the residue data that not yet transfered * calculate all the residue data that not yet tranfered * SCSI transfer counter + left in SCSI FIFO data * SCSI transfer counter + left in SCSI FIFO data * * * .....TRM_S1040_SCSI_COUNTER (24bits) * .....TRM_S1040_SCSI_COUNTER (24bits) Loading Loading @@ -3269,7 +3267,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) struct scsi_cmnd *cmd = srb->cmd; struct scsi_cmnd *cmd = srb->cmd; enum dma_data_direction dir = cmd->sc_data_direction; enum dma_data_direction dir = cmd->sc_data_direction; if (cmd->use_sg && dir != PCI_DMA_NONE) { if (cmd->use_sg && dir != PCI_DMA_NONE) { int i; /* unmap DC395x SG list */ /* unmap DC395x SG list */ dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n", srb->sg_bus_addr, SEGMENTX_LEN); srb->sg_bus_addr, SEGMENTX_LEN); Loading @@ -3279,8 +3276,6 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb) dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n", cmd->use_sg, cmd->request_buffer); cmd->use_sg, cmd->request_buffer); /* unmap the sg segments */ /* unmap the sg segments */ for (i = 0; i < srb->sg_count; i++) kunmap(virt_to_page(srb->virt_map[i])); pci_unmap_sg(acb->dev, pci_unmap_sg(acb->dev, (struct scatterlist *)cmd->request_buffer, (struct scatterlist *)cmd->request_buffer, cmd->use_sg, dir); cmd->use_sg, dir); Loading Loading @@ -3327,7 +3322,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, if (cmd->use_sg) { if (cmd->use_sg) { struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer; struct scatterlist* sg = (struct scatterlist *)cmd->request_buffer; ptr = (struct ScsiInqData *)(srb->virt_map[0] + sg->offset); ptr = (struct ScsiInqData *)(page_address(sg->page) + sg->offset); } else { } else { ptr = (struct ScsiInqData *)(cmd->request_buffer); ptr = (struct ScsiInqData *)(cmd->request_buffer); } } Loading Loading @@ -4262,9 +4257,8 @@ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb) const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN; for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page) if (acb->srb_array[i].segment_x) kfree(acb->srb_array[i].segment_x); kfree(acb->srb_array[i].segment_x); vfree(acb->srb_array[0].virt_map); } } Loading @@ -4280,12 +4274,9 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) int srb_idx = 0; int srb_idx = 0; unsigned i = 0; unsigned i = 0; struct SGentry *ptr; struct SGentry *ptr; void **virt_array; for (i = 0; i < DC395x_MAX_SRB_CNT; i++) { for (i = 0; i < DC395x_MAX_SRB_CNT; i++) acb->srb_array[i].segment_x = NULL; acb->srb_array[i].segment_x = NULL; acb->srb_array[i].virt_map = NULL; } dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages); while (pages--) { while (pages--) { Loading @@ -4306,19 +4297,6 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb) ptr + (i * DC395x_MAX_SG_LISTENTRY); ptr + (i * DC395x_MAX_SG_LISTENTRY); else else dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n"); virt_array = vmalloc((DC395x_MAX_SRB_CNT + 1) * DC395x_MAX_SG_LISTENTRY * sizeof(void*)); if (!virt_array) { adapter_sg_tables_free(acb); return 1; } for (i = 0; i < DC395x_MAX_SRB_CNT + 1; i++) { acb->srb_array[i].virt_map = virt_array; virt_array += DC395x_MAX_SG_LISTENTRY; } return 0; return 0; } } Loading