Merge branch 'master' into next

Conflicts:
	fs/nfsd/nfs4recover.c

Manually fixed above to use new creds API functions, e.g.
nfs4_save_creds().

Signed-off-by: James Morris <jmorris@namei.org>
This commit is contained in:
James Morris
2008-12-04 17:16:36 +11:00
541 changed files with 8316 additions and 4601 deletions

View File

@@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
new = get_from_free_list(idp);
if (!new)
return -1;
new->layer = l-1;
rcu_assign_pointer(p->ary[m], new);
p->count++;
}
@@ -210,6 +211,7 @@ build_up:
if (unlikely(!p)) {
if (!(p = get_from_free_list(idp)))
return -1;
p->layer = 0;
layers = 1;
}
/*
@@ -237,6 +239,7 @@ build_up:
}
new->ary[0] = p;
new->count = 1;
new->layer = layers-1;
if (p->bitmap == IDR_FULL)
__set_bit(0, &new->bitmap);
p = new;
@@ -493,17 +496,21 @@ void *idr_find(struct idr *idp, int id)
int n;
struct idr_layer *p;
n = idp->layers * IDR_BITS;
p = rcu_dereference(idp->top);
if (!p)
return NULL;
n = (p->layer+1) * IDR_BITS;
/* Mask off upper bits we don't use for the search. */
id &= MAX_ID_MASK;
if (id >= (1 << n))
return NULL;
BUG_ON(n == 0);
while (n > 0 && p) {
n -= IDR_BITS;
BUG_ON(n != p->layer*IDR_BITS);
p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
}
return((void *)p);
@@ -582,8 +589,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
int n;
struct idr_layer *p, *old_p;
n = idp->layers * IDR_BITS;
p = idp->top;
if (!p)
return ERR_PTR(-EINVAL);
n = (p->layer+1) * IDR_BITS;
id &= MAX_ID_MASK;

View File

@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
WARN_ON(!irqs_disabled());
kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
} else
kunmap(miter->addr);
kunmap(miter->page);
miter->page = NULL;
miter->addr = NULL;

View File

@@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_32BIT_MASK;
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) {
if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr, size)) {
if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)*hwdev->dma_mask,
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */