summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-07-29 01:39:26 -0400
committerJeff Garzik <jeff@garzik.org>2006-07-29 01:39:26 -0400
commit5b85f29ca453d56d83cdffa075d615583bb8fb72 (patch)
treea553754dcef01a22a6bd6e2eeb2fd4eb69bf5b37 /drivers
parent48cb37bd9e053429aacb7c2726da3300aba60c68 (diff)
parentab3b3fd38125be0242c2f94bf144b48054210882 (diff)
downloadlinux-5b85f29ca453d56d83cdffa075d615583bb8fb72.tar.gz
linux-5b85f29ca453d56d83cdffa075d615583bb8fb72.tar.xz
Merge branch 'upstream-fixes' into upstream
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c86
-rw-r--r--drivers/bluetooth/hci_usb.c25
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c3
-rw-r--r--drivers/ide/ide-disk.c2
-rw-r--r--drivers/ide/ide-dma.c2
-rw-r--r--drivers/ide/ide.c5
-rw-r--r--drivers/ide/pci/it821x.c11
-rw-r--r--drivers/infiniband/core/mad.c22
-rw-r--r--drivers/infiniband/core/user_mad.c87
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c42
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c76
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c23
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
-rw-r--r--drivers/net/sunlance.c8
-rw-r--r--drivers/net/tg3.c116
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/scsi/NCR53C9x.c16
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/esp.c12
-rw-r--r--drivers/scsi/scsi_ioctl.c5
29 files changed, 411 insertions, 228 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 1c4df22dfd2a..7b0eca703a67 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status)
}
}
+static void cciss_check_queues(ctlr_info_t *h)
+{
+ int start_queue = h->next_to_run;
+ int i;
+
+ /* check to see if we have maxed out the number of commands that can
+ * be placed on the queue. If so then exit. We do this check here
+ * in case the interrupt we serviced was from an ioctl and did not
+ * free any new commands.
+ */
+ if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
+ return;
+
+ /* We have room on the queue for more commands. Now we need to queue
+ * them up. We will also keep track of the next queue to run so
+ * that every queue gets a chance to be started first.
+ */
+ for (i = 0; i < h->highest_lun + 1; i++) {
+ int curr_queue = (start_queue + i) % (h->highest_lun + 1);
+ /* make sure the disk has been added and the drive is real
+ * because this can be called from the middle of init_one.
+ */
+ if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
+ continue;
+ blk_start_queue(h->gendisk[curr_queue]->queue);
+
+ /* check to see if we have maxed out the number of commands
+ * that can be placed on the queue.
+ */
+ if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
+ if (curr_queue == start_queue) {
+ h->next_to_run =
+ (start_queue + 1) % (h->highest_lun + 1);
+ break;
+ } else {
+ h->next_to_run = curr_queue;
+ break;
+ }
+ } else {
+ curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
+ }
+ }
+}
+
static void cciss_softirq_done(struct request *rq)
{
CommandList_struct *cmd = rq->completion_data;
@@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq)
spin_lock_irqsave(&h->lock, flags);
end_that_request_last(rq, rq->errors);
cmd_free(h, cmd, 1);
+ cciss_check_queues(h);
spin_unlock_irqrestore(&h->lock, flags);
}
@@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
CommandList_struct *c;
unsigned long flags;
__u32 a, a1, a2;
- int j;
- int start_queue = h->next_to_run;
if (interrupt_not_for_us(h))
return IRQ_NONE;
@@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
}
}
- /* check to see if we have maxed out the number of commands that can
- * be placed on the queue. If so then exit. We do this check here
- * in case the interrupt we serviced was from an ioctl and did not
- * free any new commands.
- */
- if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
- goto cleanup;
-
- /* We have room on the queue for more commands. Now we need to queue
- * them up. We will also keep track of the next queue to run so
- * that every queue gets a chance to be started first.
- */
- for (j = 0; j < h->highest_lun + 1; j++) {
- int curr_queue = (start_queue + j) % (h->highest_lun + 1);
- /* make sure the disk has been added and the drive is real
- * because this can be called from the middle of init_one.
- */
- if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
- continue;
- blk_start_queue(h->gendisk[curr_queue]->queue);
-
- /* check to see if we have maxed out the number of commands
- * that can be placed on the queue.
- */
- if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
- if (curr_queue == start_queue) {
- h->next_to_run =
- (start_queue + 1) % (h->highest_lun + 1);
- goto cleanup;
- } else {
- h->next_to_run = curr_queue;
- goto cleanup;
- }
- } else {
- curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
- }
- }
-
- cleanup:
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
return IRQ_HANDLED;
}
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 6a0c2230f82f..e2d4beac7420 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -67,6 +67,8 @@ static int ignore = 0;
static int ignore_dga = 0;
static int ignore_csr = 0;
static int ignore_sniffer = 0;
+static int disable_scofix = 0;
+static int force_scofix = 0;
static int reset = 0;
#ifdef CONFIG_BT_HCIUSB_SCO
@@ -107,9 +109,12 @@ static struct usb_device_id blacklist_ids[] = {
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
/* Broadcom BCM2035 */
- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_BROKEN_ISOC },
+ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
+ /* IBM/Lenovo ThinkPad with Broadcom chip */
+ { USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
+
/* Microsoft Wireless Transceiver for Bluetooth 2.0 */
{ USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
@@ -119,11 +124,13 @@ static struct usb_device_id blacklist_ids[] = {
/* ISSC Bluetooth Adapter v3.1 */
{ USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
- /* RTX Telecom based adapter with buggy SCO support */
+ /* RTX Telecom based adapters with buggy SCO support */
{ USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
+ { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC },
- /* Belkin F8T012 */
+ /* Belkin F8T012 and F8T013 devices */
{ USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU },
+ { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_WRONG_SCO_MTU },
/* Digianswer devices */
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
@@ -990,8 +997,10 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
if (reset || id->driver_info & HCI_RESET)
set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
- if (id->driver_info & HCI_WRONG_SCO_MTU)
- set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
+ if (force_scofix || id->driver_info & HCI_WRONG_SCO_MTU) {
+ if (!disable_scofix)
+ set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
+ }
if (id->driver_info & HCI_SNIFFER) {
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
@@ -1161,6 +1170,12 @@ MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
module_param(ignore_sniffer, bool, 0644);
MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
+module_param(disable_scofix, bool, 0644);
+MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
+
+module_param(force_scofix, bool, 0644);
+MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
+
module_param(reset, bool, 0644);
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8d328186f774..bc1088d9b379 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -364,10 +364,12 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
+ lock_cpu_hotplug(); \
mutex_lock(&policy->lock); \
ret = __cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
mutex_unlock(&policy->lock); \
+ unlock_cpu_hotplug(); \
\
return ret ? ret : count; \
}
@@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
*********************************************************************/
+/* Must be called with lock_cpu_hotplug held */
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int retval = -EINVAL;
- lock_cpu_hotplug();
dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
- unlock_cpu_hotplug();
-
return retval;
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
@@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
if (!policy)
return -EINVAL;
+ lock_cpu_hotplug();
mutex_lock(&policy->lock);
ret = __cpufreq_driver_target(policy, target_freq, relation);
mutex_unlock(&policy->lock);
+ unlock_cpu_hotplug();
cpufreq_cpu_put(policy);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
+/*
+ * Locking: Must be called with the lock_cpu_hotplug() lock held
+ * when "event" is CPUFREQ_GOV_LIMITS
+ */
static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
{
@@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
}
-int cpufreq_governor(unsigned int cpu, unsigned int event)
-{
- int ret = 0;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
-
- if (!policy)
- return -EINVAL;
-
- mutex_lock(&policy->lock);
- ret = __cpufreq_governor(policy, event);
- mutex_unlock(&policy->lock);
-
- cpufreq_cpu_put(policy);
- return ret;
-}
-EXPORT_SYMBOL_GPL(cpufreq_governor);
-
-
int cpufreq_register_governor(struct cpufreq_governor *governor)
{
struct cpufreq_governor *t;
@@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get_policy);
+/*
+ * Locking: Must be called with the lock_cpu_hotplug() lock held
+ */
static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
{
int ret = 0;
@@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
if (!data)
return -EINVAL;
+ lock_cpu_hotplug();
+
/* lock this CPU */
mutex_lock(&data->lock);
@@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
data->user_policy.governor = data->governor;
mutex_unlock(&data->lock);
+
+ unlock_cpu_hotplug();
cpufreq_cpu_put(data);
return ret;
@@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu)
if (!data)
return -ENODEV;
+ lock_cpu_hotplug();
mutex_lock(&data->lock);
dprintk("updating policy for CPU %u\n", cpu);
@@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu)
ret = __cpufreq_set_policy(data, &policy);
mutex_unlock(&data->lock);
-
+ unlock_cpu_hotplug();
cpufreq_cpu_put(data);
return ret;
}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index b3ebc8f01975..c4c578defabf 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
case CPUFREQ_GOV_LIMITS:
- lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
@@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
this_dbs_info->cur_policy,
policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
- unlock_cpu_hotplug();
break;
}
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 178f0c547eb7..52cf1f021825 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -309,7 +309,9 @@ static void do_dbs_timer(void *data)
if (!dbs_info->enable)
return;
+ lock_cpu_hotplug();
dbs_check_cpu(dbs_info);
+ unlock_cpu_hotplug();
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
}
@@ -412,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
break;
case CPUFREQ_GOV_LIMITS:
- lock_cpu_hotplug();
mutex_lock(&dbs_mutex);
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(this_dbs_info->cur_policy,
@@ -423,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
policy->min,
CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
- unlock_cpu_hotplug();
break;
}
return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 44ae5e5b94cf..a06c204589cd 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -18,6 +18,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/cpufreq.h>
+#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
@@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
+ lock_cpu_hotplug();
mutex_lock(&userspace_mutex);
if (!cpu_is_managed[policy->cpu])
goto err;
@@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
err:
mutex_unlock(&userspace_mutex);
+ unlock_cpu_hotplug();
return ret;
}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index f712e4cfd9dc..7cf3eb023521 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive)
* not available so we don't need to recheck that.
*/
capacity = idedisk_capacity(drive);
- barrier = ide_id_has_flush_cache(id) &&
+ barrier = ide_id_has_flush_cache(id) && !drive->noflush &&
(drive->addressing == 0 || capacity <= (1ULL << 28) ||
ide_id_has_flush_cache_ext(id));
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 98918fb6b2ce..7c3a13e1cf64 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive)
goto bug_dma_off;
printk(", DMA");
} else if (id->field_valid & 1) {
- printk(", BUG");
+ goto bug_dma_off;
}
return;
bug_dma_off:
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 05fbd9298db7..defd4b4bd374 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s)
const char *hd_words[] = {
"none", "noprobe", "nowerr", "cdrom", "serialize",
"autotune", "noautotune", "minus8", "swapdata", "bswap",
- "minus11", "remap", "remap63", "scsi", NULL };
+ "noflush", "remap", "remap63", "scsi", NULL };
unit = s[2] - 'a';
hw = unit / MAX_DRIVES;
unit = unit % MAX_DRIVES;
@@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s)
case -10: /* "bswap" */
drive->bswap = 1;
goto done;
+ case -11: /* noflush */
+ drive->noflush = 1;
+ goto done;
case -12: /* "remap" */
drive->remap_0_to_1 = 1;
goto done;
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 3cb04424d351..e9bad185968a 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive)
{
u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
- config_it821x_chipset_for_pio(drive, !speed);
- it821x_tune_chipset(drive, speed);
- return ide_dma_enable(drive);
+ if (speed) {
+ config_it821x_chipset_for_pio(drive, 0);
+ it821x_tune_chipset(drive, speed);
+
+ return ide_dma_enable(drive);
+ }
+
+ return 0;
}
/**
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 5ed4dab52a6f..1c3cfbbe6a97 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -167,6 +167,15 @@ static int is_vendor_method_in_use(
return 0;
}
+int ib_response_mad(struct ib_mad *mad)
+{
+ return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
+ (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
+ ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
+ (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
+}
+EXPORT_SYMBOL(ib_response_mad);
+
/*
* ib_register_mad_agent - Register to send/receive MADs
*/
@@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
}
EXPORT_SYMBOL(ib_unregister_mad_agent);
-static inline int response_mad(struct ib_mad *mad)
-{
- /* Trap represses are responses although response bit is reset */
- return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
- (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
-}
-
static void dequeue_mad(struct ib_mad_list_head *mad_list)
{
struct ib_mad_queue *mad_queue;
@@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
- if (response_mad(&mad_priv->mad.mad) &&
+ if (ib_response_mad(&mad_priv->mad.mad) &&
mad_agent_priv->agent.recv_handler) {
local->mad_priv = mad_priv;
local->recv_mad_agent = mad_agent_priv;
@@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
unsigned long flags;
spin_lock_irqsave(&port_priv->reg_lock, flags);
- if (response_mad(mad)) {
+ if (ib_response_mad(mad)) {
u32 hi_tid;
struct ib_mad_agent_private *entry;
@@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
}
/* Complete corresponding request */
- if (response_mad(mad_recv_wc->recv_buf.mad)) {
+ if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
spin_lock_irqsave(&mad_agent_priv->lock, flags);
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
if (!mad_send_wr) {
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index afe70a549c2f..1273f8807e84 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -112,8 +112,10 @@ struct ib_umad_device {
struct ib_umad_file {
struct ib_umad_port *port;
struct list_head recv_list;
+ struct list_head send_list;
struct list_head port_list;
spinlock_t recv_lock;
+ spinlock_t send_lock;
wait_queue_head_t recv_wait;
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
int agents_dead;
@@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file,
return ret;
}
+static void dequeue_send(struct ib_umad_file *file,
+ struct ib_umad_packet *packet)
+ {
+ spin_lock_irq(&file->send_lock);
+ list_del(&packet->list);
+ spin_unlock_irq(&file->send_lock);
+ }
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *send_wc)
{
struct ib_umad_file *file = agent->context;
struct ib_umad_packet *packet = send_wc->send_buf->context[0];
+ dequeue_send(file, packet);
ib_destroy_ah(packet->msg->ah);
ib_free_send_mad(packet->msg);
@@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
return 0;
}
+static int same_destination(struct ib_user_mad_hdr *hdr1,
+ struct ib_user_mad_hdr *hdr2)
+{
+ if (!hdr1->grh_present && !hdr2->grh_present)
+ return (hdr1->lid == hdr2->lid);
+
+ if (hdr1->grh_present && hdr2->grh_present)
+ return !memcmp(hdr1->gid, hdr2->gid, 16);
+
+ return 0;
+}
+
+static int is_duplicate(struct ib_umad_file *file,
+ struct ib_umad_packet *packet)
+{
+ struct ib_umad_packet *sent_packet;
+ struct ib_mad_hdr *sent_hdr, *hdr;
+
+ hdr = (struct ib_mad_hdr *) packet->mad.data;
+ list_for_each_entry(sent_packet, &file->send_list, list) {
+ sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
+
+ if ((hdr->tid != sent_hdr->tid) ||
+ (hdr->mgmt_class != sent_hdr->mgmt_class))
+ continue;
+
+ /*
+ * No need to be overly clever here. If two new operations have
+ * the same TID, reject the second as a duplicate. This is more
+ * restrictive than required by the spec.
+ */
+ if (!ib_response_mad((struct ib_mad *) hdr)) {
+ if (!ib_response_mad((struct ib_mad *) sent_hdr))
+ return 1;
+ continue;
+ } else if (!ib_response_mad((struct ib_mad *) sent_hdr))
+ continue;
+
+ if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
+ return 1;
+ }
+
+ return 0;
+}
+
static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
@@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
struct ib_ah_attr ah_attr;
struct ib_ah *ah;
struct ib_rmpp_mad *rmpp_mad;
- u8 method;
__be64 *tid;
int ret, data_len, hdr_len, copy_offset, rmpp_active;
@@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
}
/*
- * If userspace is generating a request that will generate a
- * response, we need to make sure the high-order part of the
- * transaction ID matches the agent being used to send the
- * MAD.
+ * Set the high-order part of the transaction ID to make MADs from
+ * different agents unique, and allow routing responses back to the
+ * original requestor.
*/
- method = ((struct ib_mad_hdr *) packet->msg->mad)->method;
-
- if (!(method & IB_MGMT_METHOD_RESP) &&
- method != IB_MGMT_METHOD_TRAP_REPRESS &&
- method != IB_MGMT_METHOD_SEND) {
+ if (!ib_response_mad(packet->msg->mad)) {
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
(be64_to_cpup(tid) & 0xffffffff));
+ rmpp_mad->mad_hdr.tid = *tid;
+ }
+
+ spin_lock_irq(&file->send_lock);
+ ret = is_duplicate(file, packet);
+ if (!ret)
+ list_add_tail(&packet->list, &file->send_list);
+ spin_unlock_irq(&file->send_lock);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_msg;
}
ret = ib_post_send_mad(packet->msg, NULL);
if (ret)
- goto err_msg;
+ goto err_send;
up_read(&file->port->mutex);
return count;
+err_send:
+ dequeue_send(file, packet);
err_msg:
ib_free_send_mad(packet->msg);
err_ah:
@@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
}
spin_lock_init(&file->recv_lock);
+ spin_lock_init(&file->send_lock);
INIT_LIST_HEAD(&file->recv_list);
+ INIT_LIST_HEAD(&file->send_list);
init_waitqueue_head(&file->recv_wait);
file->port = port;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index bdf5d5098190..30923eb68ec7 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -42,6 +42,13 @@
#include "uverbs.h"
+static struct lock_class_key pd_lock_key;
+static struct lock_class_key mr_lock_key;
+static struct lock_class_key cq_lock_key;
+static struct lock_class_key qp_lock_key;
+static struct lock_class_key ah_lock_key;
+static struct lock_class_key srq_lock_key;
+
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
(udata)->inbuf = (void __user *) (ibuf); \
@@ -76,12 +83,13 @@
*/
static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
- struct ib_ucontext *context)
+ struct ib_ucontext *context, struct lock_class_key *key)
{
uobj->user_handle = user_handle;
uobj->context = context;
kref_init(&uobj->ref);
init_rwsem(&uobj->mutex);
+ lockdep_set_class(&uobj->mutex, key);
uobj->live = 0;
}
@@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
if (!uobj)
return -ENOMEM;
- init_uobj(uobj, 0, file->ucontext);
+ init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
down_write(&uobj->mutex);
pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
@@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
if (!obj)
return -ENOMEM;
- init_uobj(&obj->uobject, 0, file->ucontext);
+ init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key);
down_write(&obj->uobject.mutex);
/*
@@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
if (!obj)
return -ENOMEM;
- init_uobj(&obj->uobject, cmd.user_handle, file->ucontext);
+ init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
down_write(&obj->uobject.mutex);
if (cmd.comp_channel >= 0) {
@@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
if (!obj)
return -ENOMEM;
- init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext);
+ init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
down_write(&obj->uevent.uobject.mutex);
+ srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext);
- rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext);
- srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
+ rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
+ scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext);
if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
ret = -EINVAL;
@@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
put_pd_read(pd);
put_cq_read(scq);
- put_cq_read(rcq);
+ if (rcq != scq)
+ put_cq_read(rcq);
if (srq)
put_srq_read(srq);
@@ -1150,7 +1160,7 @@ err_put:
put_pd_read(pd);
if (scq)
put_cq_read(scq);
- if (rcq)
+ if (rcq && rcq != scq)
put_cq_read(rcq);
if (srq)
put_srq_read(srq);
@@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
if (!uobj)
return -ENOMEM;
- init_uobj(uobj, cmd.user_handle, file->ucontext);
+ init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
down_write(&uobj->mutex);
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
ah = ib_create_ah(pd, &attr);
if (IS_ERR(ah)) {
ret = PTR_ERR(ah);
- goto err;
+ goto err_put;
}
ah->uobject = uobj;
@@ -1811,6 +1821,9 @@ err_copy:
err_destroy:
ib_destroy_ah(ah);
+err_put:
+ put_pd_read(pd);
+
err:
put_uobj_write(uobj);
return ret;
@@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
if (!obj)
return -ENOMEM;
- init_uobj(&obj->uobject, cmd.user_handle, file->ucontext);
+ init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
down_write(&obj->uobject.mutex);
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
@@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
srq = pd->device->create_srq(pd, &attr, &udata);
if (IS_ERR(srq)) {
ret = PTR_ERR(srq);
- goto err;
+ goto err_put;
}
srq->device = pd->device;
@@ -2029,6 +2042,9 @@ err_copy:
err_destroy:
ib_destroy_srq(srq);
+err_put:
+ put_pd_read(pd);
+
err:
put_uobj_write(&obj->uobject);
return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 823131d58b34..f98518d912b5 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -859,6 +859,38 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
__ipath_layer_rcv_lid(dd, hdr);
}
+static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
+ u32 eflags,
+ u32 l,
+ u32 etail,
+ u64 *rc)
+{
+ char emsg[128];
+ struct ipath_message_header *hdr;
+
+ get_rhf_errstring(eflags, emsg, sizeof emsg);
+ hdr = (struct ipath_message_header *)&rc[1];
+ ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
+ "tlen=%x opcode=%x egridx=%x: %s\n",
+ eflags, l,
+ ipath_hdrget_rcv_type((__le32 *) rc),
+ ipath_hdrget_length_in_bytes((__le32 *) rc),
+ be32_to_cpu(hdr->bth[0]) >> 24,
+ etail, emsg);
+
+ /* Count local link integrity errors. */
+ if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
+ u8 n = (dd->ipath_ibcctrl >>
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
+ INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
+
+ if (++dd->ipath_lli_counter > n) {
+ dd->ipath_lli_counter = 0;
+ dd->ipath_lli_errors++;
+ }
+ }
+}
+
/*
* ipath_kreceive - receive a packet
* @dd: the infinipath device
@@ -875,7 +907,6 @@ void ipath_kreceive(struct ipath_devdata *dd)
struct ipath_message_header *hdr;
u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
static u64 totcalls; /* stats, may eventually remove */
- char emsg[128];
if (!dd->ipath_hdrqtailptr) {
ipath_dev_err(dd,
@@ -938,26 +969,9 @@ reloop:
"%x\n", etype);
}
- if (eflags & ~(INFINIPATH_RHF_H_TIDERR |
- INFINIPATH_RHF_H_IHDRERR)) {
- get_rhf_errstring(eflags, emsg, sizeof emsg);
- ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
- "tlen=%x opcode=%x egridx=%x: %s\n",
- eflags, l, etype, tlen, bthbytes[0],
- ipath_hdrget_index((__le32 *) rc), emsg);
- /* Count local link integrity errors. */
- if (eflags & (INFINIPATH_RHF_H_ICRCERR |
- INFINIPATH_RHF_H_VCRCERR)) {
- u8 n = (dd->ipath_ibcctrl >>
- INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
- INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-
- if (++dd->ipath_lli_counter > n) {
- dd->ipath_lli_counter = 0;
- dd->ipath_lli_errors++;
- }
- }
- } else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
+ if (unlikely(eflags))
+ ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
+ else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
int ret = __ipath_verbs_rcv(dd, rc + 1,
ebuf, tlen);
if (ret == -ENODEV)
@@ -981,25 +995,7 @@ reloop:
else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
be32_to_cpu(hdr->bth[0]) & 0xff);
- else if (eflags & (INFINIPATH_RHF_H_TIDERR |
- INFINIPATH_RHF_H_IHDRERR)) {
- /*
- * This is a type 3 packet, only the LRH is in the
- * rcvhdrq, the rest of the header is in the eager
- * buffer.
- */
- u8 opcode;
- if (ebuf) {
- bthbytes = (u8 *) ebuf;
- opcode = *bthbytes;
- }
- else
- opcode = 0;
- get_rhf_errstring(eflags, emsg, sizeof emsg);
- ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
- "len %x\n", eflags, emsg, opcode, etail,
- tlen);
- } else {
+ else {
/*
* error packet, type of error unknown.
* Probably type 3, but we don't know, so don't
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 46773c673a1a..a5ca279370aa 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -197,6 +197,21 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
size_t off;
int ret;
+ /*
+ * We use RKEY == zero for physical addresses
+ * (see ipath_get_dma_mr).
+ */
+ if (rkey == 0) {
+ sge->mr = NULL;
+ sge->vaddr = phys_to_virt(vaddr);
+ sge->length = len;
+ sge->sge_length = len;
+ ss->sg_list = NULL;
+ ss->num_sge = 1;
+ ret = 1;
+ goto bail;
+ }
+
mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
if (unlikely(mr == NULL || mr->lkey != rkey)) {
ret = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index 56ac336dd1ec..d70a9b6b5239 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -191,10 +191,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
{
struct ipath_sge *sge = &ss->sge;
- while (length > sge->sge_length) {
- length -= sge->sge_length;
- ss->sge = *ss->sg_list++;
- }
while (length) {
u32 len = sge->length;
@@ -627,6 +623,7 @@ static int ipath_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID;
+ props->page_size_cap = PAGE_SIZE;
props->vendor_id = ipath_layer_get_vendorid(dev->dd);
props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
props->hw_ver = ipath_layer_get_pcirev(dev->dd);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index d0f7731802c9..deabc14b4ea4 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -778,11 +778,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
((dev->fw_ver & 0xffff0000ull) >> 16) |
((dev->fw_ver & 0x0000ffffull) << 16);
+ MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
+ dev->cmd.max_cmds = 1 << lg;
+
mthca_dbg(dev, "FW version %012llx, max commands %d\n",
(unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
- MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
- dev->cmd.max_cmds = 1 << lg;
MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index fab417c5cf43..b60a9d79ae54 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -370,7 +370,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
if (attr_mask & IB_SRQ_LIMIT) {
- if (attr->srq_limit > srq->max)
+ u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
+ if (attr->srq_limit > max_wr)
return -EINVAL;
mutex_lock(&srq->mutex);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 3f89f5e19036..474aa214ab57 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -212,6 +212,7 @@ struct ipoib_path {
struct ipoib_neigh {
struct ipoib_ah *ah;
+ union ib_gid dgid;
struct sk_buff_head queue;
struct neighbour *neighbour;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1c6ea1c682a5..cf71d2a5515c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -404,6 +404,8 @@ static void path_rec_completion(int status,
list_for_each_entry(neigh, &path->neigh_list, list) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
+ memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
while ((skb = __skb_dequeue(&neigh->queue)))
__skb_queue_tail(&skqueue, skb);
@@ -510,6 +512,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
if (path->ah) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
+ memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
+ sizeof(union ib_gid));
ipoib_send(dev, skb, path->ah,
be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
@@ -633,6 +637,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
neigh = *to_ipoib_neigh(skb->dst->neighbour);
if (likely(neigh->ah)) {
+ if (unlikely(memcmp(&neigh->dgid.raw,
+ skb->dst->neighbour->ha + 4,
+ sizeof(union ib_gid)))) {
+ spin_lock(&priv->lock);
+ /*
+ * It's safe to call ipoib_put_ah() inside
+ * priv->lock here, because we know that
+ * path->ah will always hold one more reference,
+ * so ipoib_put_ah() will never do more than
+ * decrement the ref count.
+ */
+ ipoib_put_ah(neigh->ah);
+ list_del(&neigh->list);
+ ipoib_neigh_free(neigh);
+ spin_unlock(&priv->lock);
+ ipoib_path_lookup(skb, dev);
+ goto out;
+ }
+
ipoib_send(dev, skb, neigh->ah,
be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
goto out;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index ab40488182b3..b5e6a7be603d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
if (!ah) {
ipoib_warn(priv, "ib_address_create failed\n");
} else {
+ spin_lock_irq(&priv->lock);
+ mcast->ah = ah;
+ spin_unlock_irq(&priv->lock);
+
ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
" AV %p, LID 0x%04x, SL %d\n",
IPOIB_GID_ARG(mcast->mcmember.mgid),
@@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
be16_to_cpu(mcast->mcmember.mlid),
mcast->mcmember.sl);
}
-
- spin_lock_irq(&priv->lock);
- mcast->ah = ah;
- spin_unlock_irq(&priv->lock);
}
/* actually send any queued packets */
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..0e3fdf7c6dd3 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
{
if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
(idprom->id_machtype == (SM_SUN4|SM_4_470))) {
- memset(&sun4_sdev, 0, sizeof(sdev));
+ memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
sun4_sdev.irqs[0] = 6;
return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
static int __exit sunlance_sun4_remove(void)
{
- struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev);
+ struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
struct net_device *net_dev = lp->dev;
unregister_netdevice(net_dev);
- lance_free_hwresources(root_lance_dev);
+ lance_free_hwresources(lp);
free_netdev(net_dev);
- dev_set_drvdata(&sun4_sdev->dev, NULL);
+ dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
return 0;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ce6f3be86da0..1b8138f641e3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.62"
-#define DRV_MODULE_RELDATE "June 30, 2006"
+#define DRV_MODULE_VERSION "3.63"
+#define DRV_MODULE_RELDATE "July 25, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
static int tg3_init_hw(struct tg3 *, int);
static int tg3_halt(struct tg3 *, int, int);
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+{
+ int err;
+
+ err = tg3_init_hw(tp, reset_phy);
+ if (err) {
+ printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
+ "aborting.\n", tp->dev->name);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ tg3_full_unlock(tp);
+ del_timer_sync(&tp->timer);
+ tp->irq_sync = 0;
+ netif_poll_enable(tp->dev);
+ dev_close(tp->dev);
+ tg3_full_lock(tp, 0);
+ }
+ return err;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
{
@@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data)
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
- tg3_init_hw(tp, 1);
+ if (tg3_init_hw(tp, 1))
+ goto out;
tg3_netif_start(tp);
if (restart_timer)
mod_timer(&tp->timer, jiffies + 1);
+out:
tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
@@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
{
struct tg3 *tp = netdev_priv(dev);
+ int err;
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
return -EINVAL;
@@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu);
- tg3_init_hw(tp, 0);
+ err = tg3_restart_hw(tp, 0);
- tg3_netif_start(tp);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
- return 0;
+ return err;
}
/* Free up pending packets in all rx/tx rings.
@@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp)
* end up in the driver. tp->{tx,}lock are held and thus
* we may not sleep.
*/
-static void tg3_init_rings(struct tg3 *tp)
+static int tg3_init_rings(struct tg3 *tp)
{
u32 i;
@@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp)
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
- -1, i) < 0)
+ if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX standard ring, "
+ "only %d out of %d buffers were allocated "
+ "successfully.\n",
+ tp->dev->name, i, tp->rx_pending);
+ if (i == 0)
+ return -ENOMEM;
+ tp->rx_pending = i;
break;
+ }
}
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
for (i = 0; i < tp->rx_jumbo_pending; i++) {
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
- -1, i) < 0)
+ -1, i) < 0) {
+ printk(KERN_WARNING PFX
+ "%s: Using a smaller RX jumbo ring, "
+ "only %d out of %d buffers were "
+ "allocated successfully.\n",
+ tp->dev->name, i, tp->rx_jumbo_pending);
+ if (i == 0) {
+ tg3_free_rings(tp);
+ return -ENOMEM;
+ }
+ tp->rx_jumbo_pending = i;
break;
+ }
}
}
+ return 0;
}
/*
@@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
{
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
+ int err = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
@@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 0);
-
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 0);
+ if (!err)
+ tg3_netif_start(tp);
tg3_full_unlock(tp);
} else {
spin_lock_bh(&tp->lock);
@@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
spin_unlock_bh(&tp->lock);
}
- return 0;
+ return err;
}
/* tp->lock is held. */
@@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
* can only do this after the hardware has been
* successfully reset.
*/
- tg3_init_rings(tp);
+ err = tg3_init_rings(tp);
+ if (err)
+ return err;
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
@@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
@@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
@@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
{
struct tg3 *tp = netdev_priv(dev);
- int irq_sync = 0;
+ int irq_sync = 0, err = 0;
if (netif_running(dev)) {
tg3_netif_stop(tp);
@@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ err = tg3_restart_hw(tp, 1);
+ if (!err)
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static u32 tg3_get_rx_csum(struct net_device *dev)
@@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp)
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
- tg3_reset_hw(tp, 1);
+ err = tg3_reset_hw(tp, 1);
+ if (err)
+ return TG3_LOOPBACK_FAILED;
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
@@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
- tg3_netif_start(tp);
+ if (!tg3_restart_hw(tp, 1))
+ tg3_netif_start(tp);
}
tg3_full_unlock(tp);
@@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ if (tg3_restart_hw(tp, 1))
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
@@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_attach(dev);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
}
@@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
- tg3_init_hw(tp, 1);
+ err = tg3_restart_hw(tp, 1);
+ if (err)
+ goto out;
tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer);
tg3_netif_start(tp);
+out:
tg3_full_unlock(tp);
- return 0;
+ return err;
}
static struct pci_driver tg3_driver = {
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index f26a2ee3aad8..3cba6c9fab11 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root,
struct ccwgroup_device *gdev;
int i;
int rc;
- int del_drvdata;
if (argc > 256) /* disallow dumb users */
return -EINVAL;
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root,
atomic_set(&gdev->onoff, 0);
- del_drvdata = 0;
for (i = 0; i < argc; i++) {
gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
@@ -180,10 +178,8 @@ ccwgroup_create(struct device *root,
rc = -EINVAL;
goto free_dev;
}
- }
- for (i = 0; i < argc; i++)
gdev->cdev[i]->dev.driver_data = gdev;
- del_drvdata = 1;
+ }
gdev->creator_id = creator_id;
gdev->count = argc;
@@ -226,9 +222,9 @@ error:
free_dev:
for (i = 0; i < argc; i++)
if (gdev->cdev[i]) {
- put_device(&gdev->cdev[i]->dev);
- if (del_drvdata)
+ if (gdev->cdev[i]->dev.driver_data == gdev)
gdev->cdev[i]->dev.driver_data = NULL;
+ put_device(&gdev->cdev[i]->dev);
}
kfree(gdev);
return rc;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ac6e0c7e43d9..7a39e0b0386c 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
if (cdev->private->iretry) {
cdev->private->iretry--;
ret = cio_halt(sch);
- return (ret == 0) ? -EBUSY : ret;
+ if (ret != -EBUSY)
+ return (ret == 0) ? -EBUSY : ret;
}
/* halt io unsuccessful. */
cdev->private->iretry = 255; /* 255 clear retries. */
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 085db4826e0e..bdc6bb262bce 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -2152,29 +2152,23 @@ static int esp_do_data_finale(struct NCR_ESP *esp,
*/
static int esp_should_clear_sync(Scsi_Cmnd *sp)
{
- unchar cmd1 = sp->cmnd[0];
- unchar cmd2 = sp->data_cmnd[0];
+ unchar cmd = sp->cmnd[0];
/* These cases are for spinning up a disk and
* waiting for that spinup to complete.
*/
- if(cmd1 == START_STOP ||
- cmd2 == START_STOP)
+ if(cmd == START_STOP)
return 0;
- if(cmd1 == TEST_UNIT_READY ||
- cmd2 == TEST_UNIT_READY)
+ if(cmd == TEST_UNIT_READY)
return 0;
/* One more special case for SCSI tape drives,
* this is what is used to probe the device for
* completion of a rewind or tape load operation.
*/
- if(sp->device->type == TYPE_TAPE) {
- if(cmd1 == MODE_SENSE ||
- cmd2 == MODE_SENSE)
- return 0;
- }
+ if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
+ return 0;
return 1;
}
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 3e1053f111dc..4cf7afc31cc7 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2427,7 +2427,7 @@ int fas216_eh_abort(Scsi_Cmnd *SCpnt)
info->stats.aborts += 1;
printk(KERN_WARNING "scsi%d: abort command ", info->host->host_no);
- __scsi_print_command(SCpnt->data_cmnd);
+ __scsi_print_command(SCpnt->cmnd);
print_debug_list();
fas216_dumpstate(info);
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index eaf64c7e54e7..98bd22714d0d 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -2754,18 +2754,15 @@ static int esp_do_data_finale(struct esp *esp)
*/
static int esp_should_clear_sync(struct scsi_cmnd *sp)
{
- u8 cmd1 = sp->cmnd[0];
- u8 cmd2 = sp->data_cmnd[0];
+ u8 cmd = sp->cmnd[0];
/* These cases are for spinning up a disk and
* waiting for that spinup to complete.
*/
- if (cmd1 == START_STOP ||
- cmd2 == START_STOP)
+ if (cmd == START_STOP)
return 0;
- if (cmd1 == TEST_UNIT_READY ||
- cmd2 == TEST_UNIT_READY)
+ if (cmd == TEST_UNIT_READY)
return 0;
/* One more special case for SCSI tape drives,
@@ -2773,8 +2770,7 @@ static int esp_should_clear_sync(struct scsi_cmnd *sp)
* completion of a rewind or tape load operation.
*/
if (sp->device->type == TYPE_TAPE) {
- if (cmd1 == MODE_SENSE ||
- cmd2 == MODE_SENSE)
+ if (cmd == MODE_SENSE)
return 0;
}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index a89c4115cfba..32293f451669 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -110,11 +110,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
sshdr.asc, sshdr.ascq);
break;
case NOT_READY: /* This happens if there is no disc in drive */
- if (sdev->removable && (cmd[0] != TEST_UNIT_READY)) {
- printk(KERN_INFO "Device not ready. Make sure"
- " there is a disc in the drive.\n");
+ if (sdev->removable)
break;
- }
case UNIT_ATTENTION:
if (sdev->removable) {
sdev->changed = 1;