Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock: hwspinlock: add MAINTAINERS entries hwspinlock/omap: omap_hwspinlock_remove should be __devexit hwspinlock/u8500: add hwspinlock driver hwspinlock/core: register a bank of hwspinlocks in a single API call hwspinlock/core: remove stubs for register/unregister hwspinlock/core: use a mutex to protect the radix tree hwspinlock/core/omap: fix id issues on multiple hwspinlock devices hwspinlock/omap: simplify allocation scheme hwspinlock/core: simplify 'owner' handling hwspinlock/core: simplify Kconfig Fix up trivial conflicts (addition of omap_hwspinlock_pdata, removal of omap_spinlock_latency) in arch/arm/mach-omap2/hwspinlock.c Also, do an "evil merge" to fix a compile error in omap_hsmmc.c which for some reason was reported in the same email thread as the "please pull hwspinlock changes".
This commit is contained in:
@@ -39,23 +39,20 @@ independent, drivers.
|
|||||||
in case an unused hwspinlock isn't available. Users of this
|
in case an unused hwspinlock isn't available. Users of this
|
||||||
API will usually want to communicate the lock's id to the remote core
|
API will usually want to communicate the lock's id to the remote core
|
||||||
before it can be used to achieve synchronization.
|
before it can be used to achieve synchronization.
|
||||||
Can be called from an atomic context (this function will not sleep) but
|
Should be called from a process context (might sleep).
|
||||||
not from within interrupt context.
|
|
||||||
|
|
||||||
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
||||||
- assign a specific hwspinlock id and return its address, or NULL
|
- assign a specific hwspinlock id and return its address, or NULL
|
||||||
if that hwspinlock is already in use. Usually board code will
|
if that hwspinlock is already in use. Usually board code will
|
||||||
be calling this function in order to reserve specific hwspinlock
|
be calling this function in order to reserve specific hwspinlock
|
||||||
ids for predefined purposes.
|
ids for predefined purposes.
|
||||||
Can be called from an atomic context (this function will not sleep) but
|
Should be called from a process context (might sleep).
|
||||||
not from within interrupt context.
|
|
||||||
|
|
||||||
int hwspin_lock_free(struct hwspinlock *hwlock);
|
int hwspin_lock_free(struct hwspinlock *hwlock);
|
||||||
- free a previously-assigned hwspinlock; returns 0 on success, or an
|
- free a previously-assigned hwspinlock; returns 0 on success, or an
|
||||||
appropriate error code on failure (e.g. -EINVAL if the hwspinlock
|
appropriate error code on failure (e.g. -EINVAL if the hwspinlock
|
||||||
is already free).
|
is already free).
|
||||||
Can be called from an atomic context (this function will not sleep) but
|
Should be called from a process context (might sleep).
|
||||||
not from within interrupt context.
|
|
||||||
|
|
||||||
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
|
int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout);
|
||||||
- lock a previously-assigned hwspinlock with a timeout limit (specified in
|
- lock a previously-assigned hwspinlock with a timeout limit (specified in
|
||||||
@@ -230,45 +227,62 @@ int hwspinlock_example2(void)
|
|||||||
|
|
||||||
4. API for implementors
|
4. API for implementors
|
||||||
|
|
||||||
int hwspin_lock_register(struct hwspinlock *hwlock);
|
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
||||||
|
const struct hwspinlock_ops *ops, int base_id, int num_locks);
|
||||||
- to be called from the underlying platform-specific implementation, in
|
- to be called from the underlying platform-specific implementation, in
|
||||||
order to register a new hwspinlock instance. Can be called from an atomic
|
order to register a new hwspinlock device (which is usually a bank of
|
||||||
context (this function will not sleep) but not from within interrupt
|
numerous locks). Should be called from a process context (this function
|
||||||
context. Returns 0 on success, or appropriate error code on failure.
|
might sleep).
|
||||||
|
Returns 0 on success, or appropriate error code on failure.
|
||||||
|
|
||||||
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
|
int hwspin_lock_unregister(struct hwspinlock_device *bank);
|
||||||
- to be called from the underlying vendor-specific implementation, in order
|
- to be called from the underlying vendor-specific implementation, in order
|
||||||
to unregister an existing (and unused) hwspinlock instance.
|
to unregister an hwspinlock device (which is usually a bank of numerous
|
||||||
Can be called from an atomic context (will not sleep) but not from
|
locks).
|
||||||
within interrupt context.
|
Should be called from a process context (this function might sleep).
|
||||||
Returns the address of hwspinlock on success, or NULL on error (e.g.
|
Returns the address of hwspinlock on success, or NULL on error (e.g.
|
||||||
if the hwspinlock is sill in use).
|
if the hwspinlock is sill in use).
|
||||||
|
|
||||||
5. struct hwspinlock
|
5. Important structs
|
||||||
|
|
||||||
This struct represents an hwspinlock instance. It is registered by the
|
struct hwspinlock_device is a device which usually contains a bank
|
||||||
underlying hwspinlock implementation using the hwspin_lock_register() API.
|
of hardware locks. It is registered by the underlying hwspinlock
|
||||||
|
implementation using the hwspin_lock_register() API.
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct hwspinlock - vendor-specific hwspinlock implementation
|
* struct hwspinlock_device - a device which usually spans numerous hwspinlocks
|
||||||
*
|
* @dev: underlying device, will be used to invoke runtime PM api
|
||||||
* @dev: underlying device, will be used with runtime PM api
|
* @ops: platform-specific hwspinlock handlers
|
||||||
* @ops: vendor-specific hwspinlock handlers
|
* @base_id: id index of the first lock in this device
|
||||||
* @id: a global, unique, system-wide, index of the lock.
|
* @num_locks: number of locks in this device
|
||||||
* @lock: initialized and used by hwspinlock core
|
* @lock: dynamically allocated array of 'struct hwspinlock'
|
||||||
* @owner: underlying implementation module, used to maintain module ref count
|
|
||||||
*/
|
*/
|
||||||
struct hwspinlock {
|
struct hwspinlock_device {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
const struct hwspinlock_ops *ops;
|
const struct hwspinlock_ops *ops;
|
||||||
int id;
|
int base_id;
|
||||||
spinlock_t lock;
|
int num_locks;
|
||||||
struct module *owner;
|
struct hwspinlock lock[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
The underlying implementation is responsible to assign the dev, ops, id and
|
struct hwspinlock_device contains an array of hwspinlock structs, each
|
||||||
owner members. The lock member, OTOH, is initialized and used by the hwspinlock
|
of which represents a single hardware lock:
|
||||||
core.
|
|
||||||
|
/**
|
||||||
|
* struct hwspinlock - this struct represents a single hwspinlock instance
|
||||||
|
* @bank: the hwspinlock_device structure which owns this lock
|
||||||
|
* @lock: initialized and used by hwspinlock core
|
||||||
|
* @priv: private data, owned by the underlying platform-specific hwspinlock drv
|
||||||
|
*/
|
||||||
|
struct hwspinlock {
|
||||||
|
struct hwspinlock_device *bank;
|
||||||
|
spinlock_t lock;
|
||||||
|
void *priv;
|
||||||
|
};
|
||||||
|
|
||||||
|
When registering a bank of locks, the hwspinlock driver only needs to
|
||||||
|
set the priv members of the locks. The rest of the members are set and
|
||||||
|
initialized by the hwspinlock core itself.
|
||||||
|
|
||||||
6. Implementation callbacks
|
6. Implementation callbacks
|
||||||
|
|
||||||
|
14
MAINTAINERS
14
MAINTAINERS
@@ -3018,6 +3018,13 @@ F: Documentation/hw_random.txt
|
|||||||
F: drivers/char/hw_random/
|
F: drivers/char/hw_random/
|
||||||
F: include/linux/hw_random.h
|
F: include/linux/hw_random.h
|
||||||
|
|
||||||
|
HARDWARE SPINLOCK CORE
|
||||||
|
M: Ohad Ben-Cohen <ohad@wizery.com>
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/hwspinlock.txt
|
||||||
|
F: drivers/hwspinlock/hwspinlock_*
|
||||||
|
F: include/linux/hwspinlock.h
|
||||||
|
|
||||||
HARMONY SOUND DRIVER
|
HARMONY SOUND DRIVER
|
||||||
M: Kyle McMartin <kyle@mcmartin.ca>
|
M: Kyle McMartin <kyle@mcmartin.ca>
|
||||||
L: linux-parisc@vger.kernel.org
|
L: linux-parisc@vger.kernel.org
|
||||||
@@ -4714,6 +4721,13 @@ S: Maintained
|
|||||||
F: drivers/video/omap2/
|
F: drivers/video/omap2/
|
||||||
F: Documentation/arm/OMAP/DSS
|
F: Documentation/arm/OMAP/DSS
|
||||||
|
|
||||||
|
OMAP HARDWARE SPINLOCK SUPPORT
|
||||||
|
M: Ohad Ben-Cohen <ohad@wizery.com>
|
||||||
|
L: linux-omap@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: drivers/hwspinlock/omap_hwspinlock.c
|
||||||
|
F: arch/arm/mach-omap2/hwspinlock.c
|
||||||
|
|
||||||
OMAP MMC SUPPORT
|
OMAP MMC SUPPORT
|
||||||
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
|
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
|
||||||
L: linux-omap@vger.kernel.org
|
L: linux-omap@vger.kernel.org
|
||||||
|
@@ -19,10 +19,15 @@
|
|||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
#include <linux/hwspinlock.h>
|
||||||
|
|
||||||
#include <plat/omap_hwmod.h>
|
#include <plat/omap_hwmod.h>
|
||||||
#include <plat/omap_device.h>
|
#include <plat/omap_device.h>
|
||||||
|
|
||||||
|
static struct hwspinlock_pdata omap_hwspinlock_pdata __initdata = {
|
||||||
|
.base_id = 0,
|
||||||
|
};
|
||||||
|
|
||||||
int __init hwspinlocks_init(void)
|
int __init hwspinlocks_init(void)
|
||||||
{
|
{
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
@@ -40,7 +45,9 @@ int __init hwspinlocks_init(void)
|
|||||||
if (oh == NULL)
|
if (oh == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pdev = omap_device_build(dev_name, 0, oh, NULL, 0, NULL, 0, false);
|
pdev = omap_device_build(dev_name, 0, oh, &omap_hwspinlock_pdata,
|
||||||
|
sizeof(struct hwspinlock_pdata),
|
||||||
|
NULL, 0, false);
|
||||||
if (IS_ERR(pdev)) {
|
if (IS_ERR(pdev)) {
|
||||||
pr_err("Can't build omap_device for %s:%s\n", dev_name,
|
pr_err("Can't build omap_device for %s:%s\n", dev_name,
|
||||||
oh_name);
|
oh_name);
|
||||||
|
@@ -2,22 +2,31 @@
|
|||||||
# Generic HWSPINLOCK framework
|
# Generic HWSPINLOCK framework
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# HWSPINLOCK always gets selected by whoever wants it.
|
||||||
config HWSPINLOCK
|
config HWSPINLOCK
|
||||||
tristate "Generic Hardware Spinlock framework"
|
tristate
|
||||||
depends on ARCH_OMAP4
|
|
||||||
help
|
|
||||||
Say y here to support the generic hardware spinlock framework.
|
|
||||||
You only need to enable this if you have hardware spinlock module
|
|
||||||
on your system (usually only relevant if your system has remote slave
|
|
||||||
coprocessors).
|
|
||||||
|
|
||||||
If unsure, say N.
|
menu "Hardware Spinlock drivers"
|
||||||
|
|
||||||
config HWSPINLOCK_OMAP
|
config HWSPINLOCK_OMAP
|
||||||
tristate "OMAP Hardware Spinlock device"
|
tristate "OMAP Hardware Spinlock device"
|
||||||
depends on HWSPINLOCK && ARCH_OMAP4
|
depends on ARCH_OMAP4
|
||||||
|
select HWSPINLOCK
|
||||||
help
|
help
|
||||||
Say y here to support the OMAP Hardware Spinlock device (firstly
|
Say y here to support the OMAP Hardware Spinlock device (firstly
|
||||||
introduced in OMAP4).
|
introduced in OMAP4).
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
config HSEM_U8500
|
||||||
|
tristate "STE Hardware Semaphore functionality"
|
||||||
|
depends on ARCH_U8500
|
||||||
|
select HWSPINLOCK
|
||||||
|
help
|
||||||
|
Say y here to support the STE Hardware Semaphore functionality, which
|
||||||
|
provides a synchronisation mechanism for the various processor on the
|
||||||
|
SoC.
|
||||||
|
|
||||||
|
If unsure, say N.
|
||||||
|
|
||||||
|
endmenu
|
||||||
|
@@ -4,3 +4,4 @@
|
|||||||
|
|
||||||
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
|
obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o
|
||||||
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
|
obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o
|
||||||
|
obj-$(CONFIG_HSEM_U8500) += u8500_hsem.o
|
||||||
|
@@ -26,6 +26,7 @@
|
|||||||
#include <linux/radix-tree.h>
|
#include <linux/radix-tree.h>
|
||||||
#include <linux/hwspinlock.h>
|
#include <linux/hwspinlock.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
|
||||||
#include "hwspinlock_internal.h"
|
#include "hwspinlock_internal.h"
|
||||||
|
|
||||||
@@ -52,10 +53,12 @@
|
|||||||
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
|
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Synchronization of access to the tree is achieved using this spinlock,
|
* Synchronization of access to the tree is achieved using this mutex,
|
||||||
* as the radix-tree API requires that users provide all synchronisation.
|
* as the radix-tree API requires that users provide all synchronisation.
|
||||||
|
* A mutex is needed because we're using non-atomic radix tree allocations.
|
||||||
*/
|
*/
|
||||||
static DEFINE_SPINLOCK(hwspinlock_tree_lock);
|
static DEFINE_MUTEX(hwspinlock_tree_lock);
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __hwspin_trylock() - attempt to lock a specific hwspinlock
|
* __hwspin_trylock() - attempt to lock a specific hwspinlock
|
||||||
@@ -114,7 +117,7 @@ int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
/* try to take the hwspinlock device */
|
/* try to take the hwspinlock device */
|
||||||
ret = hwlock->ops->trylock(hwlock);
|
ret = hwlock->bank->ops->trylock(hwlock);
|
||||||
|
|
||||||
/* if hwlock is already taken, undo spin_trylock_* and exit */
|
/* if hwlock is already taken, undo spin_trylock_* and exit */
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
@@ -196,8 +199,8 @@ int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
|
|||||||
* Allow platform-specific relax handlers to prevent
|
* Allow platform-specific relax handlers to prevent
|
||||||
* hogging the interconnect (no sleeping, though)
|
* hogging the interconnect (no sleeping, though)
|
||||||
*/
|
*/
|
||||||
if (hwlock->ops->relax)
|
if (hwlock->bank->ops->relax)
|
||||||
hwlock->ops->relax(hwlock);
|
hwlock->bank->ops->relax(hwlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -242,7 +245,7 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
|||||||
*/
|
*/
|
||||||
mb();
|
mb();
|
||||||
|
|
||||||
hwlock->ops->unlock(hwlock);
|
hwlock->bank->ops->unlock(hwlock);
|
||||||
|
|
||||||
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
|
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
|
||||||
if (mode == HWLOCK_IRQSTATE)
|
if (mode == HWLOCK_IRQSTATE)
|
||||||
@@ -254,68 +257,37 @@ void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__hwspin_unlock);
|
EXPORT_SYMBOL_GPL(__hwspin_unlock);
|
||||||
|
|
||||||
/**
|
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
|
||||||
* hwspin_lock_register() - register a new hw spinlock
|
|
||||||
* @hwlock: hwspinlock to register.
|
|
||||||
*
|
|
||||||
* This function should be called from the underlying platform-specific
|
|
||||||
* implementation, to register a new hwspinlock instance.
|
|
||||||
*
|
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
|
||||||
* within interrupt context.
|
|
||||||
*
|
|
||||||
* Returns 0 on success, or an appropriate error code on failure
|
|
||||||
*/
|
|
||||||
int hwspin_lock_register(struct hwspinlock *hwlock)
|
|
||||||
{
|
{
|
||||||
struct hwspinlock *tmp;
|
struct hwspinlock *tmp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!hwlock || !hwlock->ops ||
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
!hwlock->ops->trylock || !hwlock->ops->unlock) {
|
|
||||||
pr_err("invalid parameters\n");
|
ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
|
||||||
return -EINVAL;
|
if (ret) {
|
||||||
|
if (ret == -EEXIST)
|
||||||
|
pr_err("hwspinlock id %d already exists!\n", id);
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_init(&hwlock->lock);
|
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
|
||||||
|
|
||||||
ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
|
|
||||||
if (ret)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* mark this hwspinlock as available */
|
/* mark this hwspinlock as available */
|
||||||
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
|
tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
||||||
HWSPINLOCK_UNUSED);
|
|
||||||
|
|
||||||
/* self-sanity check which should never fail */
|
/* self-sanity check which should never fail */
|
||||||
WARN_ON(tmp != hwlock);
|
WARN_ON(tmp != hwlock);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_register);
|
|
||||||
|
|
||||||
/**
|
static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
|
||||||
* hwspin_lock_unregister() - unregister an hw spinlock
|
|
||||||
* @id: index of the specific hwspinlock to unregister
|
|
||||||
*
|
|
||||||
* This function should be called from the underlying platform-specific
|
|
||||||
* implementation, to unregister an existing (and unused) hwspinlock.
|
|
||||||
*
|
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
|
||||||
* within interrupt context.
|
|
||||||
*
|
|
||||||
* Returns the address of hwspinlock @id on success, or NULL on failure
|
|
||||||
*/
|
|
||||||
struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
|
||||||
{
|
{
|
||||||
struct hwspinlock *hwlock = NULL;
|
struct hwspinlock *hwlock = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* make sure the hwspinlock is not in use (tag is set) */
|
/* make sure the hwspinlock is not in use (tag is set) */
|
||||||
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
||||||
@@ -331,9 +303,91 @@ struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return hwlock;
|
return hwlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hwspin_lock_register() - register a new hw spinlock device
|
||||||
|
* @bank: the hwspinlock device, which usually provides numerous hw locks
|
||||||
|
* @dev: the backing device
|
||||||
|
* @ops: hwspinlock handlers for this device
|
||||||
|
* @base_id: id of the first hardware spinlock in this bank
|
||||||
|
* @num_locks: number of hwspinlocks provided by this device
|
||||||
|
*
|
||||||
|
* This function should be called from the underlying platform-specific
|
||||||
|
* implementation, to register a new hwspinlock device instance.
|
||||||
|
*
|
||||||
|
* Should be called from a process context (might sleep)
|
||||||
|
*
|
||||||
|
* Returns 0 on success, or an appropriate error code on failure
|
||||||
|
*/
|
||||||
|
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
||||||
|
const struct hwspinlock_ops *ops, int base_id, int num_locks)
|
||||||
|
{
|
||||||
|
struct hwspinlock *hwlock;
|
||||||
|
int ret = 0, i;
|
||||||
|
|
||||||
|
if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
|
||||||
|
!ops->unlock) {
|
||||||
|
pr_err("invalid parameters\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bank->dev = dev;
|
||||||
|
bank->ops = ops;
|
||||||
|
bank->base_id = base_id;
|
||||||
|
bank->num_locks = num_locks;
|
||||||
|
|
||||||
|
for (i = 0; i < num_locks; i++) {
|
||||||
|
hwlock = &bank->lock[i];
|
||||||
|
|
||||||
|
spin_lock_init(&hwlock->lock);
|
||||||
|
hwlock->bank = bank;
|
||||||
|
|
||||||
|
ret = hwspin_lock_register_single(hwlock, i);
|
||||||
|
if (ret)
|
||||||
|
goto reg_failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
reg_failed:
|
||||||
|
while (--i >= 0)
|
||||||
|
hwspin_lock_unregister_single(i);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(hwspin_lock_register);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* hwspin_lock_unregister() - unregister an hw spinlock device
|
||||||
|
* @bank: the hwspinlock device, which usually provides numerous hw locks
|
||||||
|
*
|
||||||
|
* This function should be called from the underlying platform-specific
|
||||||
|
* implementation, to unregister an existing (and unused) hwspinlock.
|
||||||
|
*
|
||||||
|
* Should be called from a process context (might sleep)
|
||||||
|
*
|
||||||
|
* Returns 0 on success, or an appropriate error code on failure
|
||||||
|
*/
|
||||||
|
int hwspin_lock_unregister(struct hwspinlock_device *bank)
|
||||||
|
{
|
||||||
|
struct hwspinlock *hwlock, *tmp;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < bank->num_locks; i++) {
|
||||||
|
hwlock = &bank->lock[i];
|
||||||
|
|
||||||
|
tmp = hwspin_lock_unregister_single(bank->base_id + i);
|
||||||
|
if (!tmp)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
/* self-sanity check that should never fail */
|
||||||
|
WARN_ON(tmp != hwlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
|
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -348,24 +402,25 @@ EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
|
|||||||
*/
|
*/
|
||||||
static int __hwspin_lock_request(struct hwspinlock *hwlock)
|
static int __hwspin_lock_request(struct hwspinlock *hwlock)
|
||||||
{
|
{
|
||||||
|
struct device *dev = hwlock->bank->dev;
|
||||||
struct hwspinlock *tmp;
|
struct hwspinlock *tmp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* prevent underlying implementation from being removed */
|
/* prevent underlying implementation from being removed */
|
||||||
if (!try_module_get(hwlock->owner)) {
|
if (!try_module_get(dev->driver->owner)) {
|
||||||
dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
|
dev_err(dev, "%s: can't get owner\n", __func__);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* notify PM core that power is now needed */
|
/* notify PM core that power is now needed */
|
||||||
ret = pm_runtime_get_sync(hwlock->dev);
|
ret = pm_runtime_get_sync(dev);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
|
dev_err(dev, "%s: can't power on device\n", __func__);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* mark hwspinlock as used, should not fail */
|
/* mark hwspinlock as used, should not fail */
|
||||||
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
|
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
|
||||||
HWSPINLOCK_UNUSED);
|
HWSPINLOCK_UNUSED);
|
||||||
|
|
||||||
/* self-sanity check that should never fail */
|
/* self-sanity check that should never fail */
|
||||||
@@ -387,7 +442,7 @@ int hwspin_lock_get_id(struct hwspinlock *hwlock)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return hwlock->id;
|
return hwlock_to_id(hwlock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
|
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
|
||||||
|
|
||||||
@@ -400,9 +455,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
|
|||||||
* to the remote core before it can be used for synchronization (to get the
|
* to the remote core before it can be used for synchronization (to get the
|
||||||
* id of a given hwlock, use hwspin_lock_get_id()).
|
* id of a given hwlock, use hwspin_lock_get_id()).
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context (simply because there is no use case for
|
|
||||||
* that yet).
|
|
||||||
*
|
*
|
||||||
* Returns the address of the assigned hwspinlock, or NULL on error
|
* Returns the address of the assigned hwspinlock, or NULL on error
|
||||||
*/
|
*/
|
||||||
@@ -411,7 +464,7 @@ struct hwspinlock *hwspin_lock_request(void)
|
|||||||
struct hwspinlock *hwlock;
|
struct hwspinlock *hwlock;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* look for an unused lock */
|
/* look for an unused lock */
|
||||||
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
|
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
|
||||||
@@ -431,7 +484,7 @@ struct hwspinlock *hwspin_lock_request(void)
|
|||||||
hwlock = NULL;
|
hwlock = NULL;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return hwlock;
|
return hwlock;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
||||||
@@ -445,9 +498,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
|||||||
* Usually early board code will be calling this function in order to
|
* Usually early board code will be calling this function in order to
|
||||||
* reserve specific hwspinlock ids for predefined purposes.
|
* reserve specific hwspinlock ids for predefined purposes.
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context (simply because there is no use case for
|
|
||||||
* that yet).
|
|
||||||
*
|
*
|
||||||
* Returns the address of the assigned hwspinlock, or NULL on error
|
* Returns the address of the assigned hwspinlock, or NULL on error
|
||||||
*/
|
*/
|
||||||
@@ -456,7 +507,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
|||||||
struct hwspinlock *hwlock;
|
struct hwspinlock *hwlock;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* make sure this hwspinlock exists */
|
/* make sure this hwspinlock exists */
|
||||||
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
|
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
|
||||||
@@ -466,7 +517,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* sanity check (this shouldn't happen) */
|
/* sanity check (this shouldn't happen) */
|
||||||
WARN_ON(hwlock->id != id);
|
WARN_ON(hwlock_to_id(hwlock) != id);
|
||||||
|
|
||||||
/* make sure this hwspinlock is unused */
|
/* make sure this hwspinlock is unused */
|
||||||
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
|
||||||
@@ -482,7 +533,7 @@ struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
|||||||
hwlock = NULL;
|
hwlock = NULL;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return hwlock;
|
return hwlock;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
||||||
@@ -495,14 +546,13 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
|
|||||||
* Should only be called with an @hwlock that was retrieved from
|
* Should only be called with an @hwlock that was retrieved from
|
||||||
* an earlier call to omap_hwspin_lock_request{_specific}.
|
* an earlier call to omap_hwspin_lock_request{_specific}.
|
||||||
*
|
*
|
||||||
* Can be called from an atomic context (will not sleep) but not from
|
* Should be called from a process context (might sleep)
|
||||||
* within interrupt context (simply because there is no use case for
|
|
||||||
* that yet).
|
|
||||||
*
|
*
|
||||||
* Returns 0 on success, or an appropriate error code on failure
|
* Returns 0 on success, or an appropriate error code on failure
|
||||||
*/
|
*/
|
||||||
int hwspin_lock_free(struct hwspinlock *hwlock)
|
int hwspin_lock_free(struct hwspinlock *hwlock)
|
||||||
{
|
{
|
||||||
|
struct device *dev = hwlock->bank->dev;
|
||||||
struct hwspinlock *tmp;
|
struct hwspinlock *tmp;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@@ -511,34 +561,34 @@ int hwspin_lock_free(struct hwspinlock *hwlock)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&hwspinlock_tree_lock);
|
mutex_lock(&hwspinlock_tree_lock);
|
||||||
|
|
||||||
/* make sure the hwspinlock is used */
|
/* make sure the hwspinlock is used */
|
||||||
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
|
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
|
||||||
HWSPINLOCK_UNUSED);
|
HWSPINLOCK_UNUSED);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
|
dev_err(dev, "%s: hwlock is already free\n", __func__);
|
||||||
dump_stack();
|
dump_stack();
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* notify the underlying device that power is not needed */
|
/* notify the underlying device that power is not needed */
|
||||||
ret = pm_runtime_put(hwlock->dev);
|
ret = pm_runtime_put(dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* mark this hwspinlock as available */
|
/* mark this hwspinlock as available */
|
||||||
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
|
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
|
||||||
HWSPINLOCK_UNUSED);
|
HWSPINLOCK_UNUSED);
|
||||||
|
|
||||||
/* sanity check (this shouldn't happen) */
|
/* sanity check (this shouldn't happen) */
|
||||||
WARN_ON(tmp != hwlock);
|
WARN_ON(tmp != hwlock);
|
||||||
|
|
||||||
module_put(hwlock->owner);
|
module_put(dev->driver->owner);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
spin_unlock(&hwspinlock_tree_lock);
|
mutex_unlock(&hwspinlock_tree_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(hwspin_lock_free);
|
EXPORT_SYMBOL_GPL(hwspin_lock_free);
|
||||||
|
@@ -21,6 +21,8 @@
|
|||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
|
|
||||||
|
struct hwspinlock_device;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct hwspinlock_ops - platform-specific hwspinlock handlers
|
* struct hwspinlock_ops - platform-specific hwspinlock handlers
|
||||||
*
|
*
|
||||||
@@ -39,23 +41,37 @@ struct hwspinlock_ops {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* struct hwspinlock - this struct represents a single hwspinlock instance
|
* struct hwspinlock - this struct represents a single hwspinlock instance
|
||||||
*
|
* @bank: the hwspinlock_device structure which owns this lock
|
||||||
* @dev: underlying device, will be used to invoke runtime PM api
|
|
||||||
* @ops: platform-specific hwspinlock handlers
|
|
||||||
* @id: a global, unique, system-wide, index of the lock.
|
|
||||||
* @lock: initialized and used by hwspinlock core
|
* @lock: initialized and used by hwspinlock core
|
||||||
* @owner: underlying implementation module, used to maintain module ref count
|
* @priv: private data, owned by the underlying platform-specific hwspinlock drv
|
||||||
*
|
|
||||||
* Note: currently simplicity was opted for, but later we can squeeze some
|
|
||||||
* memory bytes by grouping the dev, ops and owner members in a single
|
|
||||||
* per-platform struct, and have all hwspinlocks point at it.
|
|
||||||
*/
|
*/
|
||||||
struct hwspinlock {
|
struct hwspinlock {
|
||||||
struct device *dev;
|
struct hwspinlock_device *bank;
|
||||||
const struct hwspinlock_ops *ops;
|
|
||||||
int id;
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct module *owner;
|
void *priv;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct hwspinlock_device - a device which usually spans numerous hwspinlocks
|
||||||
|
* @dev: underlying device, will be used to invoke runtime PM api
|
||||||
|
* @ops: platform-specific hwspinlock handlers
|
||||||
|
* @base_id: id index of the first lock in this device
|
||||||
|
* @num_locks: number of locks in this device
|
||||||
|
* @lock: dynamically allocated array of 'struct hwspinlock'
|
||||||
|
*/
|
||||||
|
struct hwspinlock_device {
|
||||||
|
struct device *dev;
|
||||||
|
const struct hwspinlock_ops *ops;
|
||||||
|
int base_id;
|
||||||
|
int num_locks;
|
||||||
|
struct hwspinlock lock[0];
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline int hwlock_to_id(struct hwspinlock *hwlock)
|
||||||
|
{
|
||||||
|
int local_id = hwlock - &hwlock->bank->lock[0];
|
||||||
|
|
||||||
|
return hwlock->bank->base_id + local_id;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __HWSPINLOCK_HWSPINLOCK_H */
|
#endif /* __HWSPINLOCK_HWSPINLOCK_H */
|
||||||
|
@@ -41,33 +41,20 @@
|
|||||||
#define SPINLOCK_NOTTAKEN (0) /* free */
|
#define SPINLOCK_NOTTAKEN (0) /* free */
|
||||||
#define SPINLOCK_TAKEN (1) /* locked */
|
#define SPINLOCK_TAKEN (1) /* locked */
|
||||||
|
|
||||||
#define to_omap_hwspinlock(lock) \
|
|
||||||
container_of(lock, struct omap_hwspinlock, lock)
|
|
||||||
|
|
||||||
struct omap_hwspinlock {
|
|
||||||
struct hwspinlock lock;
|
|
||||||
void __iomem *addr;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct omap_hwspinlock_state {
|
|
||||||
int num_locks; /* Total number of locks in system */
|
|
||||||
void __iomem *io_base; /* Mapped base address */
|
|
||||||
};
|
|
||||||
|
|
||||||
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
|
static int omap_hwspinlock_trylock(struct hwspinlock *lock)
|
||||||
{
|
{
|
||||||
struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
|
void __iomem *lock_addr = lock->priv;
|
||||||
|
|
||||||
/* attempt to acquire the lock by reading its value */
|
/* attempt to acquire the lock by reading its value */
|
||||||
return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr));
|
return (SPINLOCK_NOTTAKEN == readl(lock_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
|
static void omap_hwspinlock_unlock(struct hwspinlock *lock)
|
||||||
{
|
{
|
||||||
struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock);
|
void __iomem *lock_addr = lock->priv;
|
||||||
|
|
||||||
/* release the lock by writing 0 to it */
|
/* release the lock by writing 0 to it */
|
||||||
writel(SPINLOCK_NOTTAKEN, omap_lock->addr);
|
writel(SPINLOCK_NOTTAKEN, lock_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -93,26 +80,23 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
|
|||||||
|
|
||||||
static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
|
static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct omap_hwspinlock *omap_lock;
|
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
|
||||||
struct omap_hwspinlock_state *state;
|
struct hwspinlock_device *bank;
|
||||||
struct hwspinlock *lock;
|
struct hwspinlock *hwlock;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
void __iomem *io_base;
|
void __iomem *io_base;
|
||||||
int i, ret;
|
int num_locks, i, ret;
|
||||||
|
|
||||||
|
if (!pdata)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
if (!res)
|
if (!res)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
|
||||||
if (!state)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
io_base = ioremap(res->start, resource_size(res));
|
io_base = ioremap(res->start, resource_size(res));
|
||||||
if (!io_base) {
|
if (!io_base)
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto free_state;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Determine number of locks */
|
/* Determine number of locks */
|
||||||
i = readl(io_base + SYSSTATUS_OFFSET);
|
i = readl(io_base + SYSSTATUS_OFFSET);
|
||||||
@@ -124,10 +108,18 @@ static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
|
|||||||
goto iounmap_base;
|
goto iounmap_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
state->num_locks = i * 32;
|
num_locks = i * 32; /* actual number of locks in this device */
|
||||||
state->io_base = io_base;
|
|
||||||
|
|
||||||
platform_set_drvdata(pdev, state);
|
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
|
||||||
|
if (!bank) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto iounmap_base;
|
||||||
|
}
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, bank);
|
||||||
|
|
||||||
|
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
|
||||||
|
hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* runtime PM will make sure the clock of this module is
|
* runtime PM will make sure the clock of this module is
|
||||||
@@ -135,79 +127,46 @@ static int __devinit omap_hwspinlock_probe(struct platform_device *pdev)
|
|||||||
*/
|
*/
|
||||||
pm_runtime_enable(&pdev->dev);
|
pm_runtime_enable(&pdev->dev);
|
||||||
|
|
||||||
for (i = 0; i < state->num_locks; i++) {
|
ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
|
||||||
omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL);
|
pdata->base_id, num_locks);
|
||||||
if (!omap_lock) {
|
if (ret)
|
||||||
ret = -ENOMEM;
|
goto reg_fail;
|
||||||
goto free_locks;
|
|
||||||
}
|
|
||||||
|
|
||||||
omap_lock->lock.dev = &pdev->dev;
|
|
||||||
omap_lock->lock.owner = THIS_MODULE;
|
|
||||||
omap_lock->lock.id = i;
|
|
||||||
omap_lock->lock.ops = &omap_hwspinlock_ops;
|
|
||||||
omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
|
|
||||||
|
|
||||||
ret = hwspin_lock_register(&omap_lock->lock);
|
|
||||||
if (ret) {
|
|
||||||
kfree(omap_lock);
|
|
||||||
goto free_locks;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_locks:
|
reg_fail:
|
||||||
while (--i >= 0) {
|
|
||||||
lock = hwspin_lock_unregister(i);
|
|
||||||
/* this should't happen, but let's give our best effort */
|
|
||||||
if (!lock) {
|
|
||||||
dev_err(&pdev->dev, "%s: cleanups failed\n", __func__);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
omap_lock = to_omap_hwspinlock(lock);
|
|
||||||
kfree(omap_lock);
|
|
||||||
}
|
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
kfree(bank);
|
||||||
iounmap_base:
|
iounmap_base:
|
||||||
iounmap(io_base);
|
iounmap(io_base);
|
||||||
free_state:
|
|
||||||
kfree(state);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_hwspinlock_remove(struct platform_device *pdev)
|
static int __devexit omap_hwspinlock_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct omap_hwspinlock_state *state = platform_get_drvdata(pdev);
|
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
|
||||||
struct hwspinlock *lock;
|
void __iomem *io_base = bank->lock[0].priv - LOCK_BASE_OFFSET;
|
||||||
struct omap_hwspinlock *omap_lock;
|
int ret;
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < state->num_locks; i++) {
|
ret = hwspin_lock_unregister(bank);
|
||||||
lock = hwspin_lock_unregister(i);
|
if (ret) {
|
||||||
/* this shouldn't happen at this point. if it does, at least
|
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
|
||||||
* don't continue with the remove */
|
return ret;
|
||||||
if (!lock) {
|
|
||||||
dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i);
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
omap_lock = to_omap_hwspinlock(lock);
|
|
||||||
kfree(omap_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
iounmap(state->io_base);
|
iounmap(io_base);
|
||||||
kfree(state);
|
kfree(bank);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct platform_driver omap_hwspinlock_driver = {
|
static struct platform_driver omap_hwspinlock_driver = {
|
||||||
.probe = omap_hwspinlock_probe,
|
.probe = omap_hwspinlock_probe,
|
||||||
.remove = omap_hwspinlock_remove,
|
.remove = __devexit_p(omap_hwspinlock_remove),
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "omap_hwspinlock",
|
.name = "omap_hwspinlock",
|
||||||
|
.owner = THIS_MODULE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
198
drivers/hwspinlock/u8500_hsem.c
Normal file
198
drivers/hwspinlock/u8500_hsem.c
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
/*
|
||||||
|
* u8500 HWSEM driver
|
||||||
|
*
|
||||||
|
* Copyright (C) 2010-2011 ST-Ericsson
|
||||||
|
*
|
||||||
|
* Implements u8500 semaphore handling for protocol 1, no interrupts.
|
||||||
|
*
|
||||||
|
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||||
|
* Heavily borrowed from the work of :
|
||||||
|
* Simon Que <sque@ti.com>
|
||||||
|
* Hari Kanigeri <h-kanigeri2@ti.com>
|
||||||
|
* Ohad Ben-Cohen <ohad@wizery.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* version 2 as published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful, but
|
||||||
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
* General Public License for more details.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/io.h>
|
||||||
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/hwspinlock.h>
|
||||||
|
#include <linux/platform_device.h>
|
||||||
|
|
||||||
|
#include "hwspinlock_internal.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Implementation of STE's HSem protocol 1 without interrutps.
|
||||||
|
* The only masterID we allow is '0x01' to force people to use
|
||||||
|
* HSems for synchronisation between processors rather than processes
|
||||||
|
* on the ARM core.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define U8500_MAX_SEMAPHORE 32 /* a total of 32 semaphore */
|
||||||
|
#define RESET_SEMAPHORE (0) /* free */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* CPU ID for master running u8500 kernel.
|
||||||
|
* Hswpinlocks should only be used to synchonise operations
|
||||||
|
* between the Cortex A9 core and the other CPUs. Hence
|
||||||
|
* forcing the masterID to a preset value.
|
||||||
|
*/
|
||||||
|
#define HSEM_MASTER_ID 0x01
|
||||||
|
|
||||||
|
#define HSEM_REGISTER_OFFSET 0x08
|
||||||
|
|
||||||
|
#define HSEM_CTRL_REG 0x00
|
||||||
|
#define HSEM_ICRALL 0x90
|
||||||
|
#define HSEM_PROTOCOL_1 0x01
|
||||||
|
|
||||||
|
static int u8500_hsem_trylock(struct hwspinlock *lock)
|
||||||
|
{
|
||||||
|
void __iomem *lock_addr = lock->priv;
|
||||||
|
|
||||||
|
writel(HSEM_MASTER_ID, lock_addr);
|
||||||
|
|
||||||
|
/* get only first 4 bit and compare to masterID.
|
||||||
|
* if equal, we have the semaphore, otherwise
|
||||||
|
* someone else has it.
|
||||||
|
*/
|
||||||
|
return (HSEM_MASTER_ID == (0x0F & readl(lock_addr)));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void u8500_hsem_unlock(struct hwspinlock *lock)
|
||||||
|
{
|
||||||
|
void __iomem *lock_addr = lock->priv;
|
||||||
|
|
||||||
|
/* release the lock by writing 0 to it */
|
||||||
|
writel(RESET_SEMAPHORE, lock_addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* u8500: what value is recommended here ?
|
||||||
|
*/
|
||||||
|
static void u8500_hsem_relax(struct hwspinlock *lock)
|
||||||
|
{
|
||||||
|
ndelay(50);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct hwspinlock_ops u8500_hwspinlock_ops = {
|
||||||
|
.trylock = u8500_hsem_trylock,
|
||||||
|
.unlock = u8500_hsem_unlock,
|
||||||
|
.relax = u8500_hsem_relax,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __devinit u8500_hsem_probe(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct hwspinlock_pdata *pdata = pdev->dev.platform_data;
|
||||||
|
struct hwspinlock_device *bank;
|
||||||
|
struct hwspinlock *hwlock;
|
||||||
|
struct resource *res;
|
||||||
|
void __iomem *io_base;
|
||||||
|
int i, ret, num_locks = U8500_MAX_SEMAPHORE;
|
||||||
|
ulong val;
|
||||||
|
|
||||||
|
if (!pdata)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||||
|
if (!res)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
io_base = ioremap(res->start, resource_size(res));
|
||||||
|
if (!io_base) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto free_state;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* make sure protocol 1 is selected */
|
||||||
|
val = readl(io_base + HSEM_CTRL_REG);
|
||||||
|
writel((val & ~HSEM_PROTOCOL_1), io_base + HSEM_CTRL_REG);
|
||||||
|
|
||||||
|
/* clear all interrupts */
|
||||||
|
writel(0xFFFF, io_base + HSEM_ICRALL);
|
||||||
|
|
||||||
|
bank = kzalloc(sizeof(*bank) + num_locks * sizeof(*hwlock), GFP_KERNEL);
|
||||||
|
if (!bank) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto iounmap_base;
|
||||||
|
}
|
||||||
|
|
||||||
|
platform_set_drvdata(pdev, bank);
|
||||||
|
|
||||||
|
for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
|
||||||
|
hwlock->priv = io_base + HSEM_REGISTER_OFFSET + sizeof(u32) * i;
|
||||||
|
|
||||||
|
/* no pm needed for HSem but required to comply with hwspilock core */
|
||||||
|
pm_runtime_enable(&pdev->dev);
|
||||||
|
|
||||||
|
ret = hwspin_lock_register(bank, &pdev->dev, &u8500_hwspinlock_ops,
|
||||||
|
pdata->base_id, num_locks);
|
||||||
|
if (ret)
|
||||||
|
goto reg_fail;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
reg_fail:
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
kfree(bank);
|
||||||
|
iounmap_base:
|
||||||
|
iounmap(io_base);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __devexit u8500_hsem_remove(struct platform_device *pdev)
|
||||||
|
{
|
||||||
|
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
|
||||||
|
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* clear all interrupts */
|
||||||
|
writel(0xFFFF, io_base + HSEM_ICRALL);
|
||||||
|
|
||||||
|
ret = hwspin_lock_unregister(bank);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
pm_runtime_disable(&pdev->dev);
|
||||||
|
iounmap(io_base);
|
||||||
|
kfree(bank);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct platform_driver u8500_hsem_driver = {
|
||||||
|
.probe = u8500_hsem_probe,
|
||||||
|
.remove = __devexit_p(u8500_hsem_remove),
|
||||||
|
.driver = {
|
||||||
|
.name = "u8500_hsem",
|
||||||
|
.owner = THIS_MODULE,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int __init u8500_hsem_init(void)
|
||||||
|
{
|
||||||
|
return platform_driver_register(&u8500_hsem_driver);
|
||||||
|
}
|
||||||
|
/* board init code might need to reserve hwspinlocks for predefined purposes */
|
||||||
|
postcore_initcall(u8500_hsem_init);
|
||||||
|
|
||||||
|
static void __exit u8500_hsem_exit(void)
|
||||||
|
{
|
||||||
|
platform_driver_unregister(&u8500_hsem_driver);
|
||||||
|
}
|
||||||
|
module_exit(u8500_hsem_exit);
|
||||||
|
|
||||||
|
MODULE_LICENSE("GPL v2");
|
||||||
|
MODULE_DESCRIPTION("Hardware Spinlock driver for u8500");
|
||||||
|
MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
|
@@ -1270,7 +1270,7 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!host->protect_card) {
|
if (!host->protect_card) {
|
||||||
pr_info"%s: cover is open, "
|
pr_info("%s: cover is open, "
|
||||||
"card is now inaccessible\n",
|
"card is now inaccessible\n",
|
||||||
mmc_hostname(host->mmc));
|
mmc_hostname(host->mmc));
|
||||||
host->protect_card = 1;
|
host->protect_card = 1;
|
||||||
|
@@ -20,17 +20,49 @@
|
|||||||
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/device.h>
|
||||||
|
|
||||||
/* hwspinlock mode argument */
|
/* hwspinlock mode argument */
|
||||||
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
|
||||||
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
|
||||||
|
|
||||||
struct hwspinlock;
|
struct hwspinlock;
|
||||||
|
struct hwspinlock_device;
|
||||||
|
struct hwspinlock_ops;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct hwspinlock_pdata - platform data for hwspinlock drivers
|
||||||
|
* @base_id: base id for this hwspinlock device
|
||||||
|
*
|
||||||
|
* hwspinlock devices provide system-wide hardware locks that are used
|
||||||
|
* by remote processors that have no other way to achieve synchronization.
|
||||||
|
*
|
||||||
|
* To achieve that, each physical lock must have a system-wide id number
|
||||||
|
* that is agreed upon, otherwise remote processors can't possibly assume
|
||||||
|
* they're using the same hardware lock.
|
||||||
|
*
|
||||||
|
* Usually boards have a single hwspinlock device, which provides several
|
||||||
|
* hwspinlocks, and in this case, they can be trivially numbered 0 to
|
||||||
|
* (num-of-locks - 1).
|
||||||
|
*
|
||||||
|
* In case boards have several hwspinlocks devices, a different base id
|
||||||
|
* should be used for each hwspinlock device (they can't all use 0 as
|
||||||
|
* a starting id!).
|
||||||
|
*
|
||||||
|
* This platform data structure should be used to provide the base id
|
||||||
|
* for each device (which is trivially 0 when only a single hwspinlock
|
||||||
|
* device exists). It can be shared between different platforms, hence
|
||||||
|
* its location.
|
||||||
|
*/
|
||||||
|
struct hwspinlock_pdata {
|
||||||
|
int base_id;
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
|
#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
|
||||||
|
|
||||||
int hwspin_lock_register(struct hwspinlock *lock);
|
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
||||||
struct hwspinlock *hwspin_lock_unregister(unsigned int id);
|
const struct hwspinlock_ops *ops, int base_id, int num_locks);
|
||||||
|
int hwspin_lock_unregister(struct hwspinlock_device *bank);
|
||||||
struct hwspinlock *hwspin_lock_request(void);
|
struct hwspinlock *hwspin_lock_request(void);
|
||||||
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
||||||
int hwspin_lock_free(struct hwspinlock *hwlock);
|
int hwspin_lock_free(struct hwspinlock *hwlock);
|
||||||
@@ -94,16 +126,6 @@ static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int hwspin_lock_register(struct hwspinlock *hwlock)
|
|
||||||
{
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* !CONFIG_HWSPINLOCK */
|
#endif /* !CONFIG_HWSPINLOCK */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Reference in New Issue
Block a user