Write dma_length of each leader with the combined lengths of
the mergable followers. */
-#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
+#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
static void
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nents; i++, sg++) {
- struct page *page = sg->page;
+ struct page *page = sg_page(sg);
unsigned int offset = sg->offset;
unsigned int length = sg->length;
void *ptr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sg++) {
- sg->dma_address = (dma_addr_t)(page_address(sg->page) +
- sg->offset);
+ sg->dma_address = (dma_addr_t) sg_virt(sg);
invalidate_dcache_range(sg_dma_address(sg),
sg_dma_address(sg) +
static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
-#define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
+#define sba_sg_address(sg) sg_virt((sg))
#ifdef FULL_VALID_PDIR
static u64 prefetch_spill_page;
stat.fd = desc[sc->device->id];
scsi_for_each_sg(sc, sl, scsi_sg_count(sc), i) {
- req.addr = __pa(page_address(sl->page) + sl->offset);
+ req.addr = __pa(sg_virt(sl));
req.len = sl->length;
if (DBG)
printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
if (!len)
break;
thislen = min(len, slp->length);
- memcpy(page_address(slp->page) + slp->offset, buf, thislen);
+ memcpy(sg_virt(slp), buf, thislen);
len -= thislen;
}
}
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
-#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
/**
int i;
for (i = 0; i < nents; sg++, i++) {
- sg->dma_address = page_to_phys(sg->page) + sg->offset;
+ sg->dma_address = sg_phys(sg);
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
return nents;
for (i = 0; i < nents; i++, sg++) {
unsigned long addr;
- addr = (unsigned long) page_address(sg->page);
+ addr = (unsigned long) sg_virt(sg);
if (!plat_device_is_coherent(dev) && addr)
- __dma_sync(addr + sg->offset, sg->length, direction);
+ __dma_sync(addr, sg->length, direction);
sg->dma_address = plat_map_dma_mem(dev,
- (void *)(addr + sg->offset),
- sg->length);
+ (void *)addr, sg->length);
}
return nents;
for (i = 0; i < nhwentries; i++, sg++) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE) {
- addr = (unsigned long) page_address(sg->page);
+ addr = (unsigned long) sg_virt(sg);
if (addr)
- __dma_sync(addr + sg->offset, sg->length,
- direction);
+ __dma_sync(addr, sg->length, direction);
}
plat_unmap_dma_mem(sg->dma_address);
}
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (cpu_is_noncoherent_r10000(dev))
- __dma_sync((unsigned long)page_address(sg->page),
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
plat_unmap_dma_mem(sg->dma_address);
}
/* Make sure that gcc doesn't leave the empty loop body. */
for (i = 0; i < nelems; i++, sg++) {
if (!plat_device_is_coherent(dev))
- __dma_sync((unsigned long)page_address(sg->page),
+ __dma_sync((unsigned long)page_address(sg_page(sg)),
sg->length, direction);
plat_unmap_dma_mem(sg->dma_address);
}
int i;
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = (page_to_phys(sg->page) + sg->offset) |
- dma_direct_offset;
+ sg->dma_address = sg_phys(sg) | dma_direct_offset;
sg->dma_length = sg->length;
}
int i;
for_each_sg(sgl, sg, nents, i) {
- sg->dma_address = (dma_addr_t)page_address(sg->page)
- + sg->offset;
+ sg->dma_address = (dma_addr_t) sg_virt(sg);
sg->dma_length = sg->length;
}
continue;
}
/* Allocate iommu entries for that segment */
- vaddr = (unsigned long)page_address(s->page) + s->offset;
+ vaddr = (unsigned long) sg_virt(s);
npages = iommu_num_pages(vaddr, slen);
entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
int i;
for_each_sg(sgl, sg, nents, i) {
- int result = ps3_dma_map(dev->d_region,
- page_to_phys(sg->page) + sg->offset, sg->length,
- &sg->dma_address, 0);
+ int result = ps3_dma_map(dev->d_region, sg_phys(sg),
+ sg->length, &sg->dma_address, 0);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
- sg->dvma_address =
- virt_to_phys(page_address(sg->page)) + sg->offset;
+ BUG_ON(page_address(sg_page(sg)) == NULL);
+ sg->dvma_address = virt_to_phys(sg_virt(sg));
sg->dvma_length = sg->length;
}
return nents;
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
- BUG_ON(page_address(sg->page) == NULL);
+ BUG_ON(page_address(sg_page(sg)) == NULL);
mmu_inval_dma_area(
- (unsigned long) page_address(sg->page),
+ (unsigned long) page_address(sg_page(sg)),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
}
}
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
- sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_address = iounit_get_area(iounit, sg_virt(sg), sg->length);
sg->dvma_length = sg->length;
sg = sg_next(sg);
}
while (sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
- sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
sg = sg_next(sg);
}
while (sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
- sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
sg = sg_next(sg);
}
* XXX Is this a good assumption?
* XXX What if someone else unmaps it here and races us?
*/
- if ((page = (unsigned long) page_address(sg->page)) != 0) {
+ if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
for (i = 0; i < n; i++) {
if (page != oldpage) { /* Already flushed? */
flush_page_for_dma(page);
}
}
- sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
+ sg->dvma_address = iommu_get_one(sg_page(sg), n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
sg = sg_next(sg);
}
{
while (sz != 0) {
--sz;
- sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_address = (__u32)sun4c_lockarea(sg_virt(sg), sg->length);
sg->dvma_length = sg->length;
sg = sg_next(sg);
}
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) \
- (__pa(page_address((SG)->page)) + (SG)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
int nused, int nelems,
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
- dma_4u_map_single(dev,
- (page_address(sglist->page) +
- sglist->offset),
+ dma_4u_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
daddr = dma_sg->dma_address;
sglen = sg->length;
- sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+ sgaddr = (unsigned long) sg_virt(sg);
while (dlen > 0) {
unsigned long paddr;
sg = sg_next(sg);
if (--nents <= 0)
break;
- sgaddr = (unsigned long) (page_address(sg->page) + sg->offset);
+ sgaddr = (unsigned long) sg_virt(sg);
sglen = sg->length;
}
if (dlen < 0) {
printk("sg(%d): page_addr(%p) off(%x) length(%x) "
"dma_address[%016x] dma_length[%016x]\n",
i,
- page_address(sg->page), sg->offset,
+ page_address(sg_page(sg)), sg->offset,
sg->length,
sg->dma_address, sg->dma_length);
}
unsigned long prev;
u32 dent_addr, dent_len;
- prev = (unsigned long) (page_address(sg->page) + sg->offset);
+ prev = (unsigned long) sg_virt(sg);
prev += (unsigned long) (dent_len = sg->length);
- dent_addr = (u32) ((unsigned long)(page_address(sg->page) + sg->offset)
- & (IO_PAGE_SIZE - 1UL));
+ dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
while (--nents) {
unsigned long addr;
sg = sg_next(sg);
- addr = (unsigned long) (page_address(sg->page) + sg->offset);
+ addr = (unsigned long) sg_virt(sg);
if (! VCONTIG(prev, addr)) {
dma_sg->dma_address = dent_addr;
dma_sg->dma_length = dent_len;
static int sg_count_one(struct scatterlist *sg)
{
- unsigned long base = page_to_pfn(sg->page) << PAGE_SHIFT;
+ unsigned long base = page_to_pfn(sg_page(sg)) << PAGE_SHIFT;
long len = sg->length;
if ((sg->offset | len) & (8UL - 1))
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) \
- (__pa(page_address((SG)->page)) + (SG)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static long fill_sg(long entry, struct device *dev,
struct scatterlist *sg,
/* Fast path single entry scatterlists. */
if (nelems == 1) {
sglist->dma_address =
- dma_4v_map_single(dev,
- (page_address(sglist->page) +
- sglist->offset),
+ dma_4v_map_single(dev, sg_virt(sglist),
sglist->length, direction);
if (unlikely(sglist->dma_address == DMA_ERROR_CODE))
return 0;
int i;
for_each_sg(sg, s, nelems, i) {
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ struct page *p = sg_page(s);
+
+ BUG_ON(!p);
+ s->dma_address = virt_to_bus(sg_virt(s));
s->dma_length = s->length;
}
return nelems;
return calgary_nontranslate_map_sg(dev, sg, nelems, direction);
for_each_sg(sg, s, nelems, i) {
- BUG_ON(!s->page);
+ BUG_ON(!sg_page(s));
- vaddr = (unsigned long)page_address(s->page) + s->offset;
+ vaddr = (unsigned long) sg_virt(s);
npages = num_dma_pages(vaddr, s->length);
entry = iommu_range_alloc(tbl, npages);
#endif
for_each_sg(sg, s, nents, i) {
- unsigned long addr = page_to_phys(s->page) + s->offset;
+ unsigned long addr = sg_phys(s);
if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir);
if (addr == bad_dma_address) {
start_sg = sgmap = sg;
ps = NULL; /* shut up gcc */
for_each_sg(sg, s, nents, i) {
- dma_addr_t addr = page_to_phys(s->page) + s->offset;
+ dma_addr_t addr = sg_phys(s);
s->dma_address = addr;
BUG_ON(s->length == 0);
int i;
for_each_sg(sg, s, nents, i) {
- BUG_ON(!s->page);
- s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
+ BUG_ON(!sg_page(s));
+ s->dma_address = virt_to_bus(sg_virt(s));
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
return 0;
s->dma_length = s->length;