}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
- npages = iommu_num_pages(paddr, slen);
+ npages = iommu_nr_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
- npages = iommu_num_pages(s->dma_address, s->dma_length);
+ npages = iommu_nr_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
entry = (vaddr - iommu->page_table_map_base)
if (!len)
break;
- npages = iommu_num_pages(dma_handle, len);
+ npages = iommu_nr_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base)
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
-static inline unsigned long iommu_num_pages(unsigned long vaddr,
+static inline unsigned long iommu_nr_pages(unsigned long vaddr,
unsigned long slen)
{
unsigned long npages;
struct scatterlist *sg)
{
unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
- int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
+ int nr = iommu_nr_pages(paddr, outs->dma_length + sg->length);
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
- npages = iommu_num_pages(paddr, slen);
+ npages = iommu_nr_pages(paddr, slen);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
unsigned long vaddr, npages;
vaddr = s->dma_address & IO_PAGE_MASK;
- npages = iommu_num_pages(s->dma_address, s->dma_length);
+ npages = iommu_nr_pages(s->dma_address, s->dma_length);
iommu_range_free(iommu, vaddr, npages);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
if (!len)
break;
- npages = iommu_num_pages(dma_handle, len);
+ npages = iommu_nr_pages(dma_handle, len);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base) >> IO_PAGE_SHIFT);