forked from luck/tmp_suning_uos_patched
Merge ../linux-2.6
This commit is contained in:
commit
26925c5910
20
MAINTAINERS
20
MAINTAINERS
|
@ -58,7 +58,7 @@ P: Person
|
|||
M: Mail patches to
|
||||
L: Mailing list that is relevant to this area
|
||||
W: Web-page with status/info
|
||||
T: SCM tree type and URL. Type is one of: git, hg, quilt.
|
||||
T: SCM tree type and location. Type is one of: git, hg, quilt.
|
||||
S: Status, one of the following:
|
||||
|
||||
Supported: Someone is actually paid to look after this.
|
||||
|
@ -227,6 +227,7 @@ AGPGART DRIVER
|
|||
P: Dave Jones
|
||||
M: davej@codemonkey.org.uk
|
||||
W: http://www.codemonkey.org.uk/projects/agp/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/davej/agpgart.git
|
||||
S: Maintained
|
||||
|
||||
AHA152X SCSI DRIVER
|
||||
|
@ -384,6 +385,7 @@ P: David Woodhouse
|
|||
M: dwmw2@infradead.org
|
||||
L: linux-audit@redhat.com
|
||||
W: http://people.redhat.com/sgrubb/audit/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/dwmw2/audit-2.6.git
|
||||
S: Maintained
|
||||
|
||||
AX.25 NETWORK LAYER
|
||||
|
@ -432,6 +434,7 @@ L: bluez-devel@lists.sf.net
|
|||
W: http://bluez.sf.net
|
||||
W: http://www.bluez.org
|
||||
W: http://www.holtmann.org/linux/bluetooth/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6.git
|
||||
S: Maintained
|
||||
|
||||
BLUETOOTH RFCOMM LAYER
|
||||
|
@ -547,6 +550,7 @@ P: Steve French
|
|||
M: sfrench@samba.org
|
||||
L: samba-technical@lists.samba.org
|
||||
W: http://us1.samba.org/samba/Linux_CIFS_client.html
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/sfrench/cifs-2.6.git
|
||||
S: Supported
|
||||
|
||||
CIRRUS LOGIC GENERIC FBDEV DRIVER
|
||||
|
@ -608,6 +612,7 @@ P: Dave Jones
|
|||
M: davej@codemonkey.org.uk
|
||||
L: cpufreq@lists.linux.org.uk
|
||||
W: http://www.codemonkey.org.uk/projects/cpufreq/
|
||||
T: git kernel.org/pub/scm/linux/kernel/davej/cpufreq.git
|
||||
S: Maintained
|
||||
|
||||
CPUID/MSR DRIVER
|
||||
|
@ -641,6 +646,7 @@ M: herbert@gondor.apana.org.au
|
|||
P: David S. Miller
|
||||
M: davem@davemloft.net
|
||||
L: linux-crypto@vger.kernel.org
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/herbert/crypto-2.6.git
|
||||
S: Maintained
|
||||
|
||||
CYBERPRO FB DRIVER
|
||||
|
@ -1185,6 +1191,7 @@ P: Bartlomiej Zolnierkiewicz
|
|||
M: B.Zolnierkiewicz@elka.pw.edu.pl
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-ide@vger.kernel.org
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/bart/ide-2.6.git
|
||||
S: Maintained
|
||||
|
||||
IDE/ATAPI CDROM DRIVER
|
||||
|
@ -1279,6 +1286,7 @@ P: Vojtech Pavlik
|
|||
M: vojtech@suse.cz
|
||||
L: linux-input@atrey.karlin.mff.cuni.cz
|
||||
L: linux-joystick@atrey.karlin.mff.cuni.cz
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/dtor/input.git
|
||||
S: Maintained
|
||||
|
||||
INOTIFY
|
||||
|
@ -1392,6 +1400,7 @@ P: Kai Germaschewski
|
|||
M: kai.germaschewski@gmx.de
|
||||
L: isdn4linux@listserv.isdn4linux.de
|
||||
W: http://www.isdn4linux.de
|
||||
T: git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git
|
||||
S: Maintained
|
||||
|
||||
ISDN SUBSYSTEM (Eicon active card driver)
|
||||
|
@ -1420,6 +1429,7 @@ P: Dave Kleikamp
|
|||
M: shaggy@austin.ibm.com
|
||||
L: jfs-discussion@lists.sourceforge.net
|
||||
W: http://jfs.sourceforge.net/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
|
||||
S: Supported
|
||||
|
||||
KCONFIG
|
||||
|
@ -1534,6 +1544,7 @@ P: Paul Mackerras
|
|||
M: paulus@samba.org
|
||||
W: http://www.penguinppc.org/
|
||||
L: linuxppc-dev@ozlabs.org
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/paulus/powerpc.git
|
||||
S: Supported
|
||||
|
||||
LINUX FOR POWER MACINTOSH
|
||||
|
@ -1601,6 +1612,7 @@ P: Chris Wright
|
|||
M: chrisw@osdl.org
|
||||
L: linux-security-module@wirex.com
|
||||
W: http://lsm.immunix.org
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/chrisw/lsm-2.6.git
|
||||
S: Supported
|
||||
|
||||
LM83 HARDWARE MONITOR DRIVER
|
||||
|
@ -1695,6 +1707,7 @@ P: David Woodhouse
|
|||
M: dwmw2@infradead.org
|
||||
W: http://www.linux-mtd.infradead.org/
|
||||
L: linux-mtd@lists.infradead.org
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6.git
|
||||
S: Maintained
|
||||
|
||||
MICROTEK X6 SCANNER
|
||||
|
@ -1815,6 +1828,7 @@ M: yoshfuji@linux-ipv6.org
|
|||
P: Patrick McHardy
|
||||
M: kaber@coreworks.de
|
||||
L: netdev@vger.kernel.org
|
||||
T: git kernel.org:/pub/scm/linux/kernel/davem/net-2.6.git
|
||||
S: Maintained
|
||||
|
||||
IPVS
|
||||
|
@ -1866,6 +1880,7 @@ M: aia21@cantab.net
|
|||
L: linux-ntfs-dev@lists.sourceforge.net
|
||||
L: linux-kernel@vger.kernel.org
|
||||
W: http://linux-ntfs.sf.net/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
|
||||
S: Maintained
|
||||
|
||||
NVIDIA (RIVA) FRAMEBUFFER DRIVER
|
||||
|
@ -2389,6 +2404,7 @@ P: Anton Blanchard
|
|||
M: anton@samba.org
|
||||
L: sparclinux@vger.kernel.org
|
||||
L: ultralinux@vger.kernel.org
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git
|
||||
S: Maintained
|
||||
|
||||
SHARP LH SUPPORT (LH7952X & LH7A40X)
|
||||
|
@ -2527,6 +2543,7 @@ P: Adrian Bunk
|
|||
M: trivial@kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
W: http://www.kernel.org/pub/linux/kernel/people/bunk/trivial/
|
||||
T: git kernel.org:/pub/scm/linux/kernel/git/bunk/trivial.git
|
||||
S: Maintained
|
||||
|
||||
TMS380 TOKEN-RING NETWORK DRIVER
|
||||
|
@ -2860,6 +2877,7 @@ P: Latchesar Ionkov
|
|||
M: lucho@ionkov.net
|
||||
L: v9fs-developer@lists.sourceforge.net
|
||||
W: http://v9fs.sf.net
|
||||
T: git kernel.org:/pub/scm/linux/kernel/ericvh/v9fs-devel.git
|
||||
S: Maintained
|
||||
|
||||
VIDEO FOR LINUX
|
||||
|
|
|
@ -393,13 +393,6 @@ void flush_thread(void)
|
|||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
/*
|
||||
* Remove function-return probe instances associated with this task
|
||||
* and put them back on the free list. Do not insert an exit probe for
|
||||
* this function, it will be disabled by kprobe_flush_task if you do.
|
||||
*/
|
||||
kprobe_flush_task(tsk);
|
||||
|
||||
memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
|
||||
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
|
||||
/*
|
||||
|
|
|
@ -132,7 +132,7 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
|
|||
}
|
||||
}
|
||||
|
||||
printk("PCI: Probing PCI hardware (bus %02x)\n", busnum);
|
||||
printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
|
||||
|
||||
return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, NULL);
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ static int __init pcibios_init(void)
|
|||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
||||
if (!raw_pci_ops) {
|
||||
printk("PCI: System does not support PCI\n");
|
||||
printk(KERN_WARNING "PCI: System does not support PCI\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -201,7 +201,7 @@ static int __init pci_sanity_check(struct pci_raw_ops *o)
|
|||
return 1;
|
||||
}
|
||||
|
||||
DBG("PCI: Sanity check failed\n");
|
||||
DBG(KERN_WARNING "PCI: Sanity check failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,6 +221,11 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
|
|||
continue;
|
||||
|
||||
r = &dev->resource[idx];
|
||||
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
|
||||
continue;
|
||||
if ((idx == PCI_ROM_RESOURCE) &&
|
||||
(!(r->flags & IORESOURCE_ROM_ENABLE)))
|
||||
continue;
|
||||
if (!r->start && r->end) {
|
||||
printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", pci_name(dev));
|
||||
return -EINVAL;
|
||||
|
@ -230,8 +235,6 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
|
|||
if (r->flags & IORESOURCE_MEM)
|
||||
cmd |= PCI_COMMAND_MEMORY;
|
||||
}
|
||||
if (dev->resource[PCI_ROM_RESOURCE].start)
|
||||
cmd |= PCI_COMMAND_MEMORY;
|
||||
if (cmd != old_cmd) {
|
||||
printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
|
||||
pci_write_config_word(dev, PCI_COMMAND, cmd);
|
||||
|
|
|
@ -718,13 +718,6 @@ kernel_thread_helper (int (*fn)(void *), void *arg)
|
|||
void
|
||||
flush_thread (void)
|
||||
{
|
||||
/*
|
||||
* Remove function-return probe instances associated with this task
|
||||
* and put them back on the free list. Do not insert an exit probe for
|
||||
* this function, it will be disabled by kprobe_flush_task if you do.
|
||||
*/
|
||||
kprobe_flush_task(current);
|
||||
|
||||
/* drop floating-point and debug-register state if it exists: */
|
||||
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
|
||||
ia64_drop_fpu(current);
|
||||
|
|
|
@ -457,7 +457,6 @@ void flush_thread(void)
|
|||
if (t->flags & _TIF_ABI_PENDING)
|
||||
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
|
||||
#endif
|
||||
kprobe_flush_task(current);
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
if (last_task_used_math == current)
|
||||
|
|
|
@ -110,13 +110,11 @@ unsigned long __init mmu_mapin_ram(void)
|
|||
pmd_t *pmdp;
|
||||
unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmdp = pmd_offset(pgd_offset_k(v), v);
|
||||
pmd_val(*pmdp++) = val;
|
||||
pmd_val(*pmdp++) = val;
|
||||
pmd_val(*pmdp++) = val;
|
||||
pmd_val(*pmdp++) = val;
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
|
||||
v += LARGE_PAGE_SIZE_16M;
|
||||
p += LARGE_PAGE_SIZE_16M;
|
||||
|
@ -127,10 +125,8 @@ unsigned long __init mmu_mapin_ram(void)
|
|||
pmd_t *pmdp;
|
||||
unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmdp = pmd_offset(pgd_offset_k(v), v);
|
||||
pmd_val(*pmdp) = val;
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
|
||||
v += LARGE_PAGE_SIZE_4M;
|
||||
p += LARGE_PAGE_SIZE_4M;
|
||||
|
|
|
@ -287,15 +287,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
|
|||
|
||||
int prepare_hugepage_range(unsigned long addr, unsigned long len)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
if ( (addr+len) < addr )
|
||||
return -EINVAL;
|
||||
|
||||
if ((addr + len) < 0x100000000UL)
|
||||
if (addr < 0x100000000UL)
|
||||
err = open_low_hpage_areas(current->mm,
|
||||
LOW_ESID_MASK(addr, len));
|
||||
else
|
||||
if ((addr + len) >= 0x100000000UL)
|
||||
err = open_high_hpage_areas(current->mm,
|
||||
HTLB_AREA_MASK(addr, len));
|
||||
if (err) {
|
||||
|
@ -754,9 +754,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
|
|||
}
|
||||
|
||||
/*
|
||||
* No need to use ldarx/stdcx here because all who
|
||||
* might be updating the pte will hold the
|
||||
* page_table_lock
|
||||
* No need to use ldarx/stdcx here
|
||||
*/
|
||||
*ptep = __pte(new_pte & ~_PAGE_BUSY);
|
||||
|
||||
|
|
|
@ -495,7 +495,7 @@ EXPORT_SYMBOL(flush_icache_user_range);
|
|||
* We use it to preload an HPTE into the hash table corresponding to
|
||||
* the updated linux PTE.
|
||||
*
|
||||
* This must always be called with the mm->page_table_lock held
|
||||
* This must always be called with the pte lock held.
|
||||
*/
|
||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t pte)
|
||||
|
|
|
@ -149,6 +149,12 @@ void flush_tlb_mm(struct mm_struct *mm)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is safe to go down the mm's list of vmas when called
|
||||
* from dup_mmap, holding mmap_sem. It would also be safe from
|
||||
* unmap_region or exit_mmap, but not from vmtruncate on SMP -
|
||||
* but it seems dup_mmap is the only SMP case which gets here.
|
||||
*/
|
||||
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
|
||||
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
|
||||
FINISH_FLUSH;
|
||||
|
|
|
@ -95,7 +95,7 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
|
|||
|
||||
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
|
||||
{
|
||||
/* This is safe as we are holding page_table_lock */
|
||||
/* This is safe since tlb_gather_mmu has disabled preemption */
|
||||
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
|
||||
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
|
||||
|
||||
|
@ -206,7 +206,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
|
|||
|
||||
void pte_free_finish(void)
|
||||
{
|
||||
/* This is safe as we are holding page_table_lock */
|
||||
/* This is safe since tlb_gather_mmu has disabled preemption */
|
||||
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
|
||||
|
||||
if (*batchp == NULL)
|
||||
|
|
|
@ -351,13 +351,6 @@ void flush_thread(void)
|
|||
struct task_struct *tsk = current;
|
||||
struct thread_info *t = current_thread_info();
|
||||
|
||||
/*
|
||||
* Remove function-return probe instances associated with this task
|
||||
* and put them back on the free list. Do not insert an exit probe for
|
||||
* this function, it will be disabled by kprobe_flush_task if you do.
|
||||
*/
|
||||
kprobe_flush_task(tsk);
|
||||
|
||||
if (t->flags & _TIF_ABI_PENDING)
|
||||
t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ static struct kobj_type ktype_bus = {
|
|||
decl_subsys(bus, &ktype_bus, NULL);
|
||||
|
||||
|
||||
/* Manually detach a device from it's associated driver. */
|
||||
/* Manually detach a device from its associated driver. */
|
||||
static int driver_helper(struct device *dev, void *data)
|
||||
{
|
||||
const char *name = data;
|
||||
|
@ -151,14 +151,13 @@ static ssize_t driver_unbind(struct device_driver *drv,
|
|||
int err = -ENODEV;
|
||||
|
||||
dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
|
||||
if ((dev) &&
|
||||
(dev->driver == drv)) {
|
||||
if (dev && dev->driver == drv) {
|
||||
device_release_driver(dev);
|
||||
err = count;
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
return count;
|
||||
put_device(dev);
|
||||
put_bus(bus);
|
||||
return err;
|
||||
}
|
||||
static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
|
||||
|
||||
|
@ -175,16 +174,14 @@ static ssize_t driver_bind(struct device_driver *drv,
|
|||
int err = -ENODEV;
|
||||
|
||||
dev = bus_find_device(bus, NULL, (void *)buf, driver_helper);
|
||||
if ((dev) &&
|
||||
(dev->driver == NULL)) {
|
||||
if (dev && dev->driver == NULL) {
|
||||
down(&dev->sem);
|
||||
err = driver_probe_device(drv, dev);
|
||||
up(&dev->sem);
|
||||
put_device(dev);
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
return count;
|
||||
put_device(dev);
|
||||
put_bus(bus);
|
||||
return err;
|
||||
}
|
||||
static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@ void device_bind_driver(struct device * dev)
|
|||
* because we don't know the format of the ID structures, nor what
|
||||
* is to be considered a match and what is not.
|
||||
*
|
||||
*
|
||||
* This function returns 1 if a match is found, an error if one
|
||||
* occurs (that is not -ENODEV or -ENXIO), and 0 otherwise.
|
||||
*
|
||||
|
@ -158,7 +157,6 @@ static int __driver_attach(struct device * dev, void * data)
|
|||
driver_probe_device(drv, dev);
|
||||
up(&dev->sem);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -225,15 +223,15 @@ void driver_detach(struct device_driver * drv)
|
|||
struct device * dev;
|
||||
|
||||
for (;;) {
|
||||
spin_lock_irq(&drv->klist_devices.k_lock);
|
||||
spin_lock(&drv->klist_devices.k_lock);
|
||||
if (list_empty(&drv->klist_devices.k_list)) {
|
||||
spin_unlock_irq(&drv->klist_devices.k_lock);
|
||||
spin_unlock(&drv->klist_devices.k_lock);
|
||||
break;
|
||||
}
|
||||
dev = list_entry(drv->klist_devices.k_list.prev,
|
||||
struct device, knode_driver.n_node);
|
||||
get_device(dev);
|
||||
spin_unlock_irq(&drv->klist_devices.k_lock);
|
||||
spin_unlock(&drv->klist_devices.k_lock);
|
||||
|
||||
down(&dev->sem);
|
||||
if (dev->driver == drv)
|
||||
|
|
|
@ -3714,12 +3714,6 @@ static int floppy_open(struct inode *inode, struct file *filp)
|
|||
USETF(FD_VERIFY);
|
||||
}
|
||||
|
||||
/* set underlying gendisk policy to reflect real ro/rw status */
|
||||
if (UTESTF(FD_DISK_WRITABLE))
|
||||
inode->i_bdev->bd_disk->policy = 0;
|
||||
else
|
||||
inode->i_bdev->bd_disk->policy = 1;
|
||||
|
||||
if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
|
||||
goto out2;
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ unsigned long drm_alloc_pages(int order, int area)
|
|||
unsigned long addr;
|
||||
unsigned int sz;
|
||||
|
||||
address = __get_free_pages(GFP_KERNEL, order);
|
||||
address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
|
||||
if (!address)
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ unsigned long DRM(alloc_pages) (int order, int area) {
|
|||
}
|
||||
spin_unlock(&DRM(mem_lock));
|
||||
|
||||
address = __get_free_pages(GFP_KERNEL, order);
|
||||
address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
|
||||
if (!address) {
|
||||
spin_lock(&DRM(mem_lock));
|
||||
++DRM(mem_stats)[area].fail_count;
|
||||
|
|
|
@ -161,7 +161,7 @@ static int mga_driver_device_is_agp(drm_device_t * dev)
|
|||
* device.
|
||||
*/
|
||||
|
||||
if ((pdev->device == 0x0525)
|
||||
if ((pdev->device == 0x0525) && pdev->bus->self
|
||||
&& (pdev->bus->self->vendor == 0x3388)
|
||||
&& (pdev->bus->self->device == 0x0021)) {
|
||||
return 0;
|
||||
|
|
|
@ -214,8 +214,6 @@ typedef struct drm_radeon_private {
|
|||
|
||||
int microcode_version;
|
||||
|
||||
int is_pci;
|
||||
|
||||
struct {
|
||||
u32 boxes;
|
||||
int freelist_timeouts;
|
||||
|
@ -275,6 +273,7 @@ typedef struct drm_radeon_private {
|
|||
|
||||
/* starting from here on, data is preserved accross an open */
|
||||
uint32_t flags; /* see radeon_chip_flags */
|
||||
int is_pci;
|
||||
} drm_radeon_private_t;
|
||||
|
||||
typedef struct drm_radeon_buf_priv {
|
||||
|
|
|
@ -570,7 +570,7 @@ static int __init hdaps_init(void)
|
|||
hdaps_idev->evbit[0] = BIT(EV_ABS);
|
||||
input_set_abs_params(hdaps_idev, ABS_X,
|
||||
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
|
||||
input_set_abs_params(hdaps_idev, ABS_X,
|
||||
input_set_abs_params(hdaps_idev, ABS_Y,
|
||||
-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
|
||||
|
||||
input_register_device(hdaps_idev);
|
||||
|
|
|
@ -522,8 +522,15 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
|
|||
struct i2c_client *client = to_i2c_client(dev);
|
||||
struct it87_data *data = i2c_get_clientdata(client);
|
||||
int val = simple_strtol(buf, NULL, 10);
|
||||
u8 reg = it87_read_value(client, IT87_REG_FAN_DIV);
|
||||
|
||||
down(&data->update_lock);
|
||||
switch (nr) {
|
||||
case 0: data->fan_div[nr] = reg & 0x07; break;
|
||||
case 1: data->fan_div[nr] = (reg >> 3) & 0x07; break;
|
||||
case 2: data->fan_div[nr] = (reg & 0x40) ? 3 : 1; break;
|
||||
}
|
||||
|
||||
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
|
||||
it87_write_value(client, IT87_REG_FAN_MIN(nr), data->fan_min[nr]);
|
||||
up(&data->update_lock);
|
||||
|
|
|
@ -451,7 +451,7 @@ static DEVICE_ATTR(fan3_div, S_IRUGO, show_fan_3_div, NULL);
|
|||
static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct lm78_data *data = lm78_update_device(dev);
|
||||
return sprintf(buf, "%d\n", vid_from_reg(82, data->vid));
|
||||
return sprintf(buf, "%d\n", vid_from_reg(data->vid, 82));
|
||||
}
|
||||
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
|
||||
|
||||
|
|
|
@ -456,7 +456,9 @@ static ssize_t store_regs_in_min0(struct device *dev, struct device_attribute *a
|
|||
(w83627thf == data->type || w83637hf == data->type))
|
||||
|
||||
/* use VRM9 calculation */
|
||||
data->in_min[0] = (u8)(((val * 100) - 70000 + 244) / 488);
|
||||
data->in_min[0] =
|
||||
SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
|
||||
255);
|
||||
else
|
||||
/* use VRM8 (standard) calculation */
|
||||
data->in_min[0] = IN_TO_REG(val);
|
||||
|
@ -481,7 +483,9 @@ static ssize_t store_regs_in_max0(struct device *dev, struct device_attribute *a
|
|||
(w83627thf == data->type || w83637hf == data->type))
|
||||
|
||||
/* use VRM9 calculation */
|
||||
data->in_max[0] = (u8)(((val * 100) - 70000 + 244) / 488);
|
||||
data->in_max[0] =
|
||||
SENSORS_LIMIT(((val * 100) - 70000 + 244) / 488, 0,
|
||||
255);
|
||||
else
|
||||
/* use VRM8 (standard) calculation */
|
||||
data->in_max[0] = IN_TO_REG(val);
|
||||
|
|
|
@ -59,7 +59,6 @@ struct slot {
|
|||
struct slot *next;
|
||||
u8 bus;
|
||||
u8 device;
|
||||
u16 status;
|
||||
u32 number;
|
||||
u8 state;
|
||||
struct timer_list task_event;
|
||||
|
|
|
@ -207,7 +207,6 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
|
|||
* power fault Cleared
|
||||
*/
|
||||
info("Power fault cleared on Slot(%d)\n", ctrl->first_slot + hp_slot);
|
||||
p_slot->status = 0x00;
|
||||
taskInfo->event_type = INT_POWER_FAULT_CLEAR;
|
||||
} else {
|
||||
/*
|
||||
|
@ -215,8 +214,6 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
|
|||
*/
|
||||
info("Power fault on Slot(%d)\n", ctrl->first_slot + hp_slot);
|
||||
taskInfo->event_type = INT_POWER_FAULT;
|
||||
/* set power fault status for this board */
|
||||
p_slot->status = 0xFF;
|
||||
info("power fault bit %x set\n", hp_slot);
|
||||
}
|
||||
if (rc)
|
||||
|
@ -317,13 +314,10 @@ static int board_added(struct slot *p_slot)
|
|||
return rc;
|
||||
}
|
||||
|
||||
dbg("%s: slot status = %x\n", __FUNCTION__, p_slot->status);
|
||||
|
||||
/* Check for a power fault */
|
||||
if (p_slot->status == 0xFF) {
|
||||
/* power fault occurred, but it was benign */
|
||||
if (p_slot->hpc_ops->query_power_fault(p_slot)) {
|
||||
dbg("%s: power fault detected\n", __FUNCTION__);
|
||||
rc = POWER_FAILURE;
|
||||
p_slot->status = 0;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
|
@ -334,8 +328,6 @@ static int board_added(struct slot *p_slot)
|
|||
goto err_exit;
|
||||
}
|
||||
|
||||
p_slot->status = 0;
|
||||
|
||||
/*
|
||||
* Some PCI Express root ports require fixup after hot-plug operation.
|
||||
*/
|
||||
|
@ -382,9 +374,6 @@ static int remove_board(struct slot *p_slot)
|
|||
|
||||
dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot);
|
||||
|
||||
/* Change status to shutdown */
|
||||
p_slot->status = 0x01;
|
||||
|
||||
/* Wait for exclusive access to hardware */
|
||||
down(&ctrl->crit_sect);
|
||||
|
||||
|
|
|
@ -750,7 +750,7 @@ static int hpc_power_on_slot(struct slot * slot)
|
|||
{
|
||||
struct php_ctlr_state_s *php_ctlr = slot->ctrl->hpc_ctlr_handle;
|
||||
u16 slot_cmd;
|
||||
u16 slot_ctrl;
|
||||
u16 slot_ctrl, slot_status;
|
||||
|
||||
int retval = 0;
|
||||
|
||||
|
@ -767,6 +767,14 @@ static int hpc_power_on_slot(struct slot * slot)
|
|||
return -1;
|
||||
}
|
||||
|
||||
/* Clear sticky power-fault bit from previous power failures */
|
||||
hp_register_read_word(php_ctlr->pci_dev,
|
||||
SLOT_STATUS(slot->ctrl->cap_base), slot_status);
|
||||
slot_status &= PWR_FAULT_DETECTED;
|
||||
if (slot_status)
|
||||
hp_register_write_word(php_ctlr->pci_dev,
|
||||
SLOT_STATUS(slot->ctrl->cap_base), slot_status);
|
||||
|
||||
retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), slot_ctrl);
|
||||
|
||||
if (retval) {
|
||||
|
|
|
@ -178,6 +178,7 @@ EXPORT_SYMBOL(pci_osc_support_set);
|
|||
|
||||
/**
|
||||
* pci_osc_control_set - commit requested control to Firmware
|
||||
* @handle: acpi_handle for the target ACPI object
|
||||
* @flags: driver's requested control bits
|
||||
*
|
||||
* Attempt to take control from Firmware on requested control bits.
|
||||
|
|
|
@ -20,9 +20,17 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/usb.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <linux/usb.h>
|
||||
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/pmac_feature.h>
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <asm/prom.h>
|
||||
#endif
|
||||
|
||||
#include "usb.h"
|
||||
#include "hcd.h"
|
||||
|
@ -277,8 +285,22 @@ int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message)
|
|||
}
|
||||
|
||||
done:
|
||||
if (retval == 0)
|
||||
if (retval == 0) {
|
||||
dev->dev.power.power_state = PMSG_SUSPEND;
|
||||
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
/* Disable ASIC clocks for USB */
|
||||
if (_machine == _MACH_Pmac) {
|
||||
struct device_node *of_node;
|
||||
|
||||
of_node = pci_device_to_OF_node (dev);
|
||||
if (of_node)
|
||||
pmac_call_feature(PMAC_FTR_USB_ENABLE,
|
||||
of_node, 0, 0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL (usb_hcd_pci_suspend);
|
||||
|
@ -301,6 +323,18 @@ int usb_hcd_pci_resume (struct pci_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
/* Reenable ASIC clocks for USB */
|
||||
if (_machine == _MACH_Pmac) {
|
||||
struct device_node *of_node;
|
||||
|
||||
of_node = pci_device_to_OF_node (dev);
|
||||
if (of_node)
|
||||
pmac_call_feature (PMAC_FTR_USB_ENABLE,
|
||||
of_node, 0, 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* NOTE: chip docs cover clean "real suspend" cases (what Linux
|
||||
* calls "standby", "suspend to RAM", and so on). There are also
|
||||
* dirty cases when swsusp fakes a suspend in "shutdown" mode.
|
||||
|
|
|
@ -1669,7 +1669,6 @@ int usb_suspend_device(struct usb_device *udev)
|
|||
return 0;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_suspend_device);
|
||||
|
||||
/*
|
||||
* If the USB "suspend" state is in use (rather than "global suspend"),
|
||||
|
|
|
@ -411,50 +411,39 @@ static void ehci_stop (struct usb_hcd *hcd)
|
|||
dbg_status (ehci, "ehci_stop completed", readl (&ehci->regs->status));
|
||||
}
|
||||
|
||||
static int ehci_run (struct usb_hcd *hcd)
|
||||
/* one-time init, only for memory state */
|
||||
static int ehci_init(struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
|
||||
u32 temp;
|
||||
int retval;
|
||||
u32 hcc_params;
|
||||
int first;
|
||||
|
||||
/* skip some things on restart paths */
|
||||
first = (ehci->watchdog.data == 0);
|
||||
if (first) {
|
||||
init_timer (&ehci->watchdog);
|
||||
ehci->watchdog.function = ehci_watchdog;
|
||||
ehci->watchdog.data = (unsigned long) ehci;
|
||||
}
|
||||
spin_lock_init(&ehci->lock);
|
||||
|
||||
init_timer(&ehci->watchdog);
|
||||
ehci->watchdog.function = ehci_watchdog;
|
||||
ehci->watchdog.data = (unsigned long) ehci;
|
||||
|
||||
/*
|
||||
* hw default: 1K periodic list heads, one per frame.
|
||||
* periodic_size can shrink by USBCMD update if hcc_params allows.
|
||||
*/
|
||||
ehci->periodic_size = DEFAULT_I_TDPS;
|
||||
if (first && (retval = ehci_mem_init (ehci, GFP_KERNEL)) < 0)
|
||||
if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
|
||||
return retval;
|
||||
|
||||
/* controllers may cache some of the periodic schedule ... */
|
||||
hcc_params = readl (&ehci->caps->hcc_params);
|
||||
if (HCC_ISOC_CACHE (hcc_params)) // full frame cache
|
||||
hcc_params = readl(&ehci->caps->hcc_params);
|
||||
if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
|
||||
ehci->i_thresh = 8;
|
||||
else // N microframes cached
|
||||
ehci->i_thresh = 2 + HCC_ISOC_THRES (hcc_params);
|
||||
ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
|
||||
|
||||
ehci->reclaim = NULL;
|
||||
ehci->reclaim_ready = 0;
|
||||
ehci->next_uframe = -1;
|
||||
|
||||
/* controller state: unknown --> reset */
|
||||
|
||||
/* EHCI spec section 4.1 */
|
||||
if ((retval = ehci_reset (ehci)) != 0) {
|
||||
ehci_mem_cleanup (ehci);
|
||||
return retval;
|
||||
}
|
||||
writel (ehci->periodic_dma, &ehci->regs->frame_list);
|
||||
|
||||
/*
|
||||
* dedicate a qh for the async ring head, since we couldn't unlink
|
||||
* a 'real' qh without stopping the async schedule [4.8]. use it
|
||||
|
@ -462,37 +451,13 @@ static int ehci_run (struct usb_hcd *hcd)
|
|||
* its dummy is used in hw_alt_next of many tds, to prevent the qh
|
||||
* from automatically advancing to the next td after short reads.
|
||||
*/
|
||||
if (first) {
|
||||
ehci->async->qh_next.qh = NULL;
|
||||
ehci->async->hw_next = QH_NEXT (ehci->async->qh_dma);
|
||||
ehci->async->hw_info1 = cpu_to_le32 (QH_HEAD);
|
||||
ehci->async->hw_token = cpu_to_le32 (QTD_STS_HALT);
|
||||
ehci->async->hw_qtd_next = EHCI_LIST_END;
|
||||
ehci->async->qh_state = QH_STATE_LINKED;
|
||||
ehci->async->hw_alt_next = QTD_NEXT (ehci->async->dummy->qtd_dma);
|
||||
}
|
||||
writel ((u32)ehci->async->qh_dma, &ehci->regs->async_next);
|
||||
|
||||
/*
|
||||
* hcc_params controls whether ehci->regs->segment must (!!!)
|
||||
* be used; it constrains QH/ITD/SITD and QTD locations.
|
||||
* pci_pool consistent memory always uses segment zero.
|
||||
* streaming mappings for I/O buffers, like pci_map_single(),
|
||||
* can return segments above 4GB, if the device allows.
|
||||
*
|
||||
* NOTE: the dma mask is visible through dma_supported(), so
|
||||
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
|
||||
* Scsi_Host.highmem_io, and so forth. It's readonly to all
|
||||
* host side drivers though.
|
||||
*/
|
||||
if (HCC_64BIT_ADDR (hcc_params)) {
|
||||
writel (0, &ehci->regs->segment);
|
||||
#if 0
|
||||
// this is deeply broken on almost all architectures
|
||||
if (!dma_set_mask (hcd->self.controller, DMA_64BIT_MASK))
|
||||
ehci_info (ehci, "enabled 64bit DMA\n");
|
||||
#endif
|
||||
}
|
||||
ehci->async->qh_next.qh = NULL;
|
||||
ehci->async->hw_next = QH_NEXT(ehci->async->qh_dma);
|
||||
ehci->async->hw_info1 = cpu_to_le32(QH_HEAD);
|
||||
ehci->async->hw_token = cpu_to_le32(QTD_STS_HALT);
|
||||
ehci->async->hw_qtd_next = EHCI_LIST_END;
|
||||
ehci->async->qh_state = QH_STATE_LINKED;
|
||||
ehci->async->hw_alt_next = QTD_NEXT(ehci->async->dummy->qtd_dma);
|
||||
|
||||
/* clear interrupt enables, set irq latency */
|
||||
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
|
||||
|
@ -507,13 +472,13 @@ static int ehci_run (struct usb_hcd *hcd)
|
|||
* make problems: throughput reduction (!), data errors...
|
||||
*/
|
||||
if (park) {
|
||||
park = min (park, (unsigned) 3);
|
||||
park = min(park, (unsigned) 3);
|
||||
temp |= CMD_PARK;
|
||||
temp |= park << 8;
|
||||
}
|
||||
ehci_info (ehci, "park %d\n", park);
|
||||
ehci_dbg(ehci, "park %d\n", park);
|
||||
}
|
||||
if (HCC_PGM_FRAMELISTLEN (hcc_params)) {
|
||||
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
|
||||
/* periodic schedule size can be smaller than default */
|
||||
temp &= ~(3 << 2);
|
||||
temp |= (EHCI_TUNE_FLS << 2);
|
||||
|
@ -521,16 +486,63 @@ static int ehci_run (struct usb_hcd *hcd)
|
|||
case 0: ehci->periodic_size = 1024; break;
|
||||
case 1: ehci->periodic_size = 512; break;
|
||||
case 2: ehci->periodic_size = 256; break;
|
||||
default: BUG ();
|
||||
default: BUG();
|
||||
}
|
||||
}
|
||||
ehci->command = temp;
|
||||
|
||||
ehci->reboot_notifier.notifier_call = ehci_reboot;
|
||||
register_reboot_notifier(&ehci->reboot_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* start HC running; it's halted, ehci_init() has been run (once) */
|
||||
static int ehci_run (struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
int retval;
|
||||
u32 temp;
|
||||
u32 hcc_params;
|
||||
|
||||
/* EHCI spec section 4.1 */
|
||||
if ((retval = ehci_reset(ehci)) != 0) {
|
||||
unregister_reboot_notifier(&ehci->reboot_notifier);
|
||||
ehci_mem_cleanup(ehci);
|
||||
return retval;
|
||||
}
|
||||
writel(ehci->periodic_dma, &ehci->regs->frame_list);
|
||||
writel((u32)ehci->async->qh_dma, &ehci->regs->async_next);
|
||||
|
||||
/*
|
||||
* hcc_params controls whether ehci->regs->segment must (!!!)
|
||||
* be used; it constrains QH/ITD/SITD and QTD locations.
|
||||
* pci_pool consistent memory always uses segment zero.
|
||||
* streaming mappings for I/O buffers, like pci_map_single(),
|
||||
* can return segments above 4GB, if the device allows.
|
||||
*
|
||||
* NOTE: the dma mask is visible through dma_supported(), so
|
||||
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
|
||||
* Scsi_Host.highmem_io, and so forth. It's readonly to all
|
||||
* host side drivers though.
|
||||
*/
|
||||
hcc_params = readl(&ehci->caps->hcc_params);
|
||||
if (HCC_64BIT_ADDR(hcc_params)) {
|
||||
writel(0, &ehci->regs->segment);
|
||||
#if 0
|
||||
// this is deeply broken on almost all architectures
|
||||
if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK))
|
||||
ehci_info(ehci, "enabled 64bit DMA\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Philips, Intel, and maybe others need CMD_RUN before the
|
||||
// root hub will detect new devices (why?); NEC doesn't
|
||||
temp |= CMD_RUN;
|
||||
writel (temp, &ehci->regs->command);
|
||||
dbg_cmd (ehci, "init", temp);
|
||||
|
||||
/* set async sleep time = 10 us ... ? */
|
||||
ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
|
||||
ehci->command |= CMD_RUN;
|
||||
writel (ehci->command, &ehci->regs->command);
|
||||
dbg_cmd (ehci, "init", ehci->command);
|
||||
|
||||
/*
|
||||
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
|
||||
|
@ -538,26 +550,23 @@ static int ehci_run (struct usb_hcd *hcd)
|
|||
* involved with the root hub. (Except where one is integrated,
|
||||
* and there's no companion controller unless maybe for USB OTG.)
|
||||
*/
|
||||
if (first) {
|
||||
ehci->reboot_notifier.notifier_call = ehci_reboot;
|
||||
register_reboot_notifier (&ehci->reboot_notifier);
|
||||
}
|
||||
|
||||
hcd->state = HC_STATE_RUNNING;
|
||||
writel (FLAG_CF, &ehci->regs->configured_flag);
|
||||
readl (&ehci->regs->command); /* unblock posted write */
|
||||
readl (&ehci->regs->command); /* unblock posted writes */
|
||||
|
||||
temp = HC_VERSION(readl (&ehci->caps->hc_capbase));
|
||||
ehci_info (ehci,
|
||||
"USB %x.%x %s, EHCI %x.%02x, driver %s\n",
|
||||
"USB %x.%x started, EHCI %x.%02x, driver %s\n",
|
||||
((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f),
|
||||
first ? "initialized" : "restarted",
|
||||
temp >> 8, temp & 0xff, DRIVER_VERSION);
|
||||
|
||||
writel (INTR_MASK, &ehci->regs->intr_enable); /* Turn On Interrupts */
|
||||
|
||||
if (first)
|
||||
create_debug_files (ehci);
|
||||
/* GRR this is run-once init(), being done every time the HC starts.
|
||||
* So long as they're part of class devices, we can't do it init()
|
||||
* since the class device isn't created that early.
|
||||
*/
|
||||
create_debug_files(ehci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -636,9 +645,8 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd, struct pt_regs *regs)
|
|||
* stop that signaling.
|
||||
*/
|
||||
ehci->reset_done [i] = jiffies + msecs_to_jiffies (20);
|
||||
mod_timer (&hcd->rh_timer,
|
||||
ehci->reset_done [i] + 1);
|
||||
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
|
||||
usb_hcd_resume_root_hub(hcd);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -94,6 +94,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
|
|||
msleep(5);
|
||||
spin_lock_irq (&ehci->lock);
|
||||
|
||||
/* Ideally and we've got a real resume here, and no port's power
|
||||
* was lost. (For PCI, that means Vaux was maintained.) But we
|
||||
* could instead be restoring a swsusp snapshot -- so that BIOS was
|
||||
* the last user of the controller, not reset/pm hardware keeping
|
||||
* state we gave to it.
|
||||
*/
|
||||
|
||||
/* re-init operational registers in case we lost power */
|
||||
if (readl (&ehci->regs->intr_enable) == 0) {
|
||||
/* at least some APM implementations will try to deliver
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
/* EHCI 0.96 (and later) section 5.1 says how to kick BIOS/SMM/...
|
||||
* off the controller (maybe it can boot from highspeed USB disks).
|
||||
*/
|
||||
static int bios_handoff (struct ehci_hcd *ehci, int where, u32 cap)
|
||||
static int bios_handoff(struct ehci_hcd *ehci, int where, u32 cap)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
|
||||
|
||||
|
@ -48,7 +48,7 @@ static int bios_handoff (struct ehci_hcd *ehci, int where, u32 cap)
|
|||
where, cap);
|
||||
// some BIOS versions seem buggy...
|
||||
// return 1;
|
||||
ehci_warn (ehci, "continuing after BIOS bug...\n");
|
||||
ehci_warn(ehci, "continuing after BIOS bug...\n");
|
||||
/* disable all SMIs, and clear "BIOS owns" flag */
|
||||
pci_write_config_dword(pdev, where + 4, 0);
|
||||
pci_write_config_byte(pdev, where + 2, 0);
|
||||
|
@ -58,96 +58,47 @@ static int bios_handoff (struct ehci_hcd *ehci, int where, u32 cap)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* called by khubd or root hub init threads */
|
||||
static int ehci_pci_reset (struct usb_hcd *hcd)
|
||||
/* called after powerup, by probe or system-pm "wakeup" */
|
||||
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
u32 temp;
|
||||
int retval;
|
||||
unsigned count = 256/4;
|
||||
|
||||
spin_lock_init (&ehci->lock);
|
||||
|
||||
ehci->caps = hcd->regs;
|
||||
ehci->regs = hcd->regs + HC_LENGTH (readl (&ehci->caps->hc_capbase));
|
||||
dbg_hcs_params (ehci, "reset");
|
||||
dbg_hcc_params (ehci, "reset");
|
||||
|
||||
/* cache this readonly data; minimize chip reads */
|
||||
ehci->hcs_params = readl (&ehci->caps->hcs_params);
|
||||
|
||||
if (hcd->self.controller->bus == &pci_bus_type) {
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
|
||||
switch (pdev->vendor) {
|
||||
case PCI_VENDOR_ID_TDI:
|
||||
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
|
||||
ehci->is_tdi_rh_tt = 1;
|
||||
tdi_reset (ehci);
|
||||
}
|
||||
break;
|
||||
case PCI_VENDOR_ID_AMD:
|
||||
/* AMD8111 EHCI doesn't work, according to AMD errata */
|
||||
if (pdev->device == 0x7463) {
|
||||
ehci_info (ehci, "ignoring AMD8111 (errata)\n");
|
||||
return -EIO;
|
||||
}
|
||||
break;
|
||||
case PCI_VENDOR_ID_NVIDIA:
|
||||
/* NVidia reports that certain chips don't handle
|
||||
* QH, ITD, or SITD addresses above 2GB. (But TD,
|
||||
* data buffer, and periodic schedule are normal.)
|
||||
*/
|
||||
switch (pdev->device) {
|
||||
case 0x003c: /* MCP04 */
|
||||
case 0x005b: /* CK804 */
|
||||
case 0x00d8: /* CK8 */
|
||||
case 0x00e8: /* CK8S */
|
||||
if (pci_set_consistent_dma_mask(pdev,
|
||||
DMA_31BIT_MASK) < 0)
|
||||
ehci_warn (ehci, "can't enable NVidia "
|
||||
"workaround for >2GB RAM\n");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
/* optional debug port, normally in the first BAR */
|
||||
temp = pci_find_capability(pdev, 0x0a);
|
||||
if (temp) {
|
||||
pci_read_config_dword(pdev, temp, &temp);
|
||||
temp >>= 16;
|
||||
if ((temp & (3 << 13)) == (1 << 13)) {
|
||||
temp &= 0x1fff;
|
||||
ehci->debug = ehci_to_hcd(ehci)->regs + temp;
|
||||
temp = readl(&ehci->debug->control);
|
||||
ehci_info(ehci, "debug port %d%s\n",
|
||||
HCS_DEBUG_PORT(ehci->hcs_params),
|
||||
(temp & DBGP_ENABLED)
|
||||
? " IN USE"
|
||||
: "");
|
||||
if (!(temp & DBGP_ENABLED))
|
||||
ehci->debug = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* optional debug port, normally in the first BAR */
|
||||
temp = pci_find_capability (pdev, 0x0a);
|
||||
if (temp) {
|
||||
pci_read_config_dword(pdev, temp, &temp);
|
||||
temp >>= 16;
|
||||
if ((temp & (3 << 13)) == (1 << 13)) {
|
||||
temp &= 0x1fff;
|
||||
ehci->debug = hcd->regs + temp;
|
||||
temp = readl (&ehci->debug->control);
|
||||
ehci_info (ehci, "debug port %d%s\n",
|
||||
HCS_DEBUG_PORT(ehci->hcs_params),
|
||||
(temp & DBGP_ENABLED)
|
||||
? " IN USE"
|
||||
: "");
|
||||
if (!(temp & DBGP_ENABLED))
|
||||
ehci->debug = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
temp = HCC_EXT_CAPS (readl (&ehci->caps->hcc_params));
|
||||
} else
|
||||
temp = 0;
|
||||
temp = HCC_EXT_CAPS(readl(&ehci->caps->hcc_params));
|
||||
|
||||
/* EHCI 0.96 and later may have "extended capabilities" */
|
||||
while (temp && count--) {
|
||||
u32 cap;
|
||||
|
||||
pci_read_config_dword (to_pci_dev(hcd->self.controller),
|
||||
temp, &cap);
|
||||
ehci_dbg (ehci, "capability %04x at %02x\n", cap, temp);
|
||||
pci_read_config_dword(pdev, temp, &cap);
|
||||
ehci_dbg(ehci, "capability %04x at %02x\n", cap, temp);
|
||||
switch (cap & 0xff) {
|
||||
case 1: /* BIOS/SMM/... handoff */
|
||||
if (bios_handoff (ehci, temp, cap) != 0)
|
||||
if (bios_handoff(ehci, temp, cap) != 0)
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
case 0: /* illegal reserved capability */
|
||||
ehci_warn (ehci, "illegal capability!\n");
|
||||
ehci_dbg(ehci, "illegal capability!\n");
|
||||
cap = 0;
|
||||
/* FALLTHROUGH */
|
||||
default: /* unknown */
|
||||
|
@ -156,77 +107,109 @@ static int ehci_pci_reset (struct usb_hcd *hcd)
|
|||
temp = (cap >> 8) & 0xff;
|
||||
}
|
||||
if (!count) {
|
||||
ehci_err (ehci, "bogus capabilities ... PCI problems!\n");
|
||||
ehci_err(ehci, "bogus capabilities ... PCI problems!\n");
|
||||
return -EIO;
|
||||
}
|
||||
if (ehci_is_TDI(ehci))
|
||||
ehci_reset (ehci);
|
||||
|
||||
ehci_port_power (ehci, 0);
|
||||
/* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */
|
||||
retval = pci_set_mwi(pdev);
|
||||
if (!retval)
|
||||
ehci_dbg(ehci, "MWI active\n");
|
||||
|
||||
ehci_port_power(ehci, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* called by khubd or root hub (re)init threads; leaves HC in halt state */
|
||||
static int ehci_pci_reset(struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
u32 temp;
|
||||
int retval;
|
||||
|
||||
ehci->caps = hcd->regs;
|
||||
ehci->regs = hcd->regs + HC_LENGTH(readl(&ehci->caps->hc_capbase));
|
||||
dbg_hcs_params(ehci, "reset");
|
||||
dbg_hcc_params(ehci, "reset");
|
||||
|
||||
/* cache this readonly data; minimize chip reads */
|
||||
ehci->hcs_params = readl(&ehci->caps->hcs_params);
|
||||
|
||||
retval = ehci_halt(ehci);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
/* NOTE: only the parts below this line are PCI-specific */
|
||||
|
||||
switch (pdev->vendor) {
|
||||
case PCI_VENDOR_ID_TDI:
|
||||
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
|
||||
ehci->is_tdi_rh_tt = 1;
|
||||
tdi_reset(ehci);
|
||||
}
|
||||
break;
|
||||
case PCI_VENDOR_ID_AMD:
|
||||
/* AMD8111 EHCI doesn't work, according to AMD errata */
|
||||
if (pdev->device == 0x7463) {
|
||||
ehci_info(ehci, "ignoring AMD8111 (errata)\n");
|
||||
return -EIO;
|
||||
}
|
||||
break;
|
||||
case PCI_VENDOR_ID_NVIDIA:
|
||||
/* NVidia reports that certain chips don't handle
|
||||
* QH, ITD, or SITD addresses above 2GB. (But TD,
|
||||
* data buffer, and periodic schedule are normal.)
|
||||
*/
|
||||
switch (pdev->device) {
|
||||
case 0x003c: /* MCP04 */
|
||||
case 0x005b: /* CK804 */
|
||||
case 0x00d8: /* CK8 */
|
||||
case 0x00e8: /* CK8S */
|
||||
if (pci_set_consistent_dma_mask(pdev,
|
||||
DMA_31BIT_MASK) < 0)
|
||||
ehci_warn(ehci, "can't enable NVidia "
|
||||
"workaround for >2GB RAM\n");
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (ehci_is_TDI(ehci))
|
||||
ehci_reset(ehci);
|
||||
|
||||
/* at least the Genesys GL880S needs fixup here */
|
||||
temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params);
|
||||
temp &= 0x0f;
|
||||
if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) {
|
||||
ehci_dbg (ehci, "bogus port configuration: "
|
||||
ehci_dbg(ehci, "bogus port configuration: "
|
||||
"cc=%d x pcc=%d < ports=%d\n",
|
||||
HCS_N_CC(ehci->hcs_params),
|
||||
HCS_N_PCC(ehci->hcs_params),
|
||||
HCS_N_PORTS(ehci->hcs_params));
|
||||
|
||||
if (hcd->self.controller->bus == &pci_bus_type) {
|
||||
struct pci_dev *pdev;
|
||||
|
||||
pdev = to_pci_dev(hcd->self.controller);
|
||||
switch (pdev->vendor) {
|
||||
case 0x17a0: /* GENESYS */
|
||||
/* GL880S: should be PORTS=2 */
|
||||
temp |= (ehci->hcs_params & ~0xf);
|
||||
ehci->hcs_params = temp;
|
||||
break;
|
||||
case PCI_VENDOR_ID_NVIDIA:
|
||||
/* NF4: should be PCC=10 */
|
||||
break;
|
||||
}
|
||||
switch (pdev->vendor) {
|
||||
case 0x17a0: /* GENESYS */
|
||||
/* GL880S: should be PORTS=2 */
|
||||
temp |= (ehci->hcs_params & ~0xf);
|
||||
ehci->hcs_params = temp;
|
||||
break;
|
||||
case PCI_VENDOR_ID_NVIDIA:
|
||||
/* NF4: should be PCC=10 */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* force HC to halt state */
|
||||
return ehci_halt (ehci);
|
||||
}
|
||||
/* Serial Bus Release Number is at PCI 0x60 offset */
|
||||
pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
|
||||
|
||||
static int ehci_pci_start (struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
int result = 0;
|
||||
/* REVISIT: per-port wake capability (PCI 0x62) currently unused */
|
||||
|
||||
if (hcd->self.controller->bus == &pci_bus_type) {
|
||||
struct pci_dev *pdev;
|
||||
u16 port_wake;
|
||||
retval = ehci_pci_reinit(ehci, pdev);
|
||||
|
||||
pdev = to_pci_dev(hcd->self.controller);
|
||||
|
||||
/* Serial Bus Release Number is at PCI 0x60 offset */
|
||||
pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
|
||||
|
||||
/* port wake capability, reported by boot firmware */
|
||||
pci_read_config_word(pdev, 0x62, &port_wake);
|
||||
hcd->can_wakeup = (port_wake & 1) != 0;
|
||||
|
||||
/* help hc dma work well with cachelines */
|
||||
result = pci_set_mwi(pdev);
|
||||
if (result)
|
||||
ehci_dbg(ehci, "unable to enable MWI - not fatal.\n");
|
||||
}
|
||||
|
||||
return ehci_run (hcd);
|
||||
}
|
||||
|
||||
/* always called by thread; normally rmmod */
|
||||
|
||||
static void ehci_pci_stop (struct usb_hcd *hcd)
|
||||
{
|
||||
ehci_stop (hcd);
|
||||
/* finish init */
|
||||
return ehci_init(hcd);
|
||||
}
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
|
@ -235,90 +218,88 @@ static void ehci_pci_stop (struct usb_hcd *hcd)
|
|||
|
||||
/* suspend/resume, section 4.3 */
|
||||
|
||||
/* These routines rely on the bus (pci, platform, etc)
|
||||
/* These routines rely on the PCI bus glue
|
||||
* to handle powerdown and wakeup, and currently also on
|
||||
* transceivers that don't need any software attention to set up
|
||||
* the right sort of wakeup.
|
||||
* Also they depend on separate root hub suspend/resume.
|
||||
*/
|
||||
|
||||
static int ehci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
|
||||
static int ehci_pci_suspend(struct usb_hcd *hcd, pm_message_t message)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
|
||||
|
||||
if (time_before (jiffies, ehci->next_statechange))
|
||||
msleep (100);
|
||||
if (time_before(jiffies, ehci->next_statechange))
|
||||
msleep(10);
|
||||
|
||||
#ifdef CONFIG_USB_SUSPEND
|
||||
(void) usb_suspend_device (hcd->self.root_hub);
|
||||
#else
|
||||
usb_lock_device (hcd->self.root_hub);
|
||||
(void) ehci_bus_suspend (hcd);
|
||||
usb_unlock_device (hcd->self.root_hub);
|
||||
#endif
|
||||
|
||||
// save (PCI) FLADJ in case of Vaux power loss
|
||||
// could save FLADJ in case of Vaux power loss
|
||||
// ... we'd only use it to handle clock skew
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ehci_pci_resume (struct usb_hcd *hcd)
|
||||
static int ehci_pci_resume(struct usb_hcd *hcd)
|
||||
{
|
||||
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
|
||||
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
|
||||
unsigned port;
|
||||
struct usb_device *root = hcd->self.root_hub;
|
||||
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
|
||||
int retval = -EINVAL;
|
||||
|
||||
// maybe restore (PCI) FLADJ
|
||||
// maybe restore FLADJ
|
||||
|
||||
if (time_before (jiffies, ehci->next_statechange))
|
||||
msleep (100);
|
||||
if (time_before(jiffies, ehci->next_statechange))
|
||||
msleep(100);
|
||||
|
||||
/* If CF is clear, we lost PCI Vaux power and need to restart. */
|
||||
if (readl(&ehci->regs->configured_flag) != FLAG_CF)
|
||||
goto restart;
|
||||
|
||||
/* If any port is suspended (or owned by the companion),
|
||||
* we know we can/must resume the HC (and mustn't reset it).
|
||||
* We just defer that to the root hub code.
|
||||
*/
|
||||
for (port = HCS_N_PORTS (ehci->hcs_params); port > 0; ) {
|
||||
for (port = HCS_N_PORTS(ehci->hcs_params); port > 0; ) {
|
||||
u32 status;
|
||||
port--;
|
||||
status = readl (&ehci->regs->port_status [port]);
|
||||
status = readl(&ehci->regs->port_status [port]);
|
||||
if (!(status & PORT_POWER))
|
||||
continue;
|
||||
if (status & (PORT_SUSPEND | PORT_OWNER)) {
|
||||
down (&hcd->self.root_hub->serialize);
|
||||
retval = ehci_bus_resume (hcd);
|
||||
up (&hcd->self.root_hub->serialize);
|
||||
break;
|
||||
if (status & (PORT_SUSPEND | PORT_RESUME | PORT_OWNER)) {
|
||||
usb_hcd_resume_root_hub(hcd);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
restart:
|
||||
ehci_dbg(ehci, "lost power, restarting\n");
|
||||
for (port = HCS_N_PORTS(ehci->hcs_params); port > 0; ) {
|
||||
port--;
|
||||
if (!root->children [port])
|
||||
continue;
|
||||
dbg_port (ehci, __FUNCTION__, port + 1, status);
|
||||
usb_set_device_state (root->children[port],
|
||||
usb_set_device_state(root->children[port],
|
||||
USB_STATE_NOTATTACHED);
|
||||
}
|
||||
|
||||
/* Else reset, to cope with power loss or flush-to-storage
|
||||
* style "resume" having activated BIOS during reboot.
|
||||
* style "resume" having let BIOS kick in during reboot.
|
||||
*/
|
||||
if (port == 0) {
|
||||
(void) ehci_halt (ehci);
|
||||
(void) ehci_reset (ehci);
|
||||
(void) ehci_pci_reset (hcd);
|
||||
(void) ehci_halt(ehci);
|
||||
(void) ehci_reset(ehci);
|
||||
(void) ehci_pci_reinit(ehci, pdev);
|
||||
|
||||
/* emptying the schedule aborts any urbs */
|
||||
spin_lock_irq (&ehci->lock);
|
||||
if (ehci->reclaim)
|
||||
ehci->reclaim_ready = 1;
|
||||
ehci_work (ehci, NULL);
|
||||
spin_unlock_irq (&ehci->lock);
|
||||
/* emptying the schedule aborts any urbs */
|
||||
spin_lock_irq(&ehci->lock);
|
||||
if (ehci->reclaim)
|
||||
ehci->reclaim_ready = 1;
|
||||
ehci_work(ehci, NULL);
|
||||
spin_unlock_irq(&ehci->lock);
|
||||
|
||||
/* restart; khubd will disconnect devices */
|
||||
retval = ehci_run (hcd);
|
||||
/* restart; khubd will disconnect devices */
|
||||
retval = ehci_run(hcd);
|
||||
|
||||
/* here we "know" root ports should always stay powered;
|
||||
* but some controllers may lose all power.
|
||||
*/
|
||||
ehci_port_power (ehci, 1);
|
||||
}
|
||||
/* here we "know" root ports should always stay powered */
|
||||
ehci_port_power(ehci, 1);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -339,12 +320,12 @@ static const struct hc_driver ehci_pci_hc_driver = {
|
|||
* basic lifecycle operations
|
||||
*/
|
||||
.reset = ehci_pci_reset,
|
||||
.start = ehci_pci_start,
|
||||
.start = ehci_run,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ehci_pci_suspend,
|
||||
.resume = ehci_pci_resume,
|
||||
#endif
|
||||
.stop = ehci_pci_stop,
|
||||
.stop = ehci_stop,
|
||||
|
||||
/*
|
||||
* managing i/o requests and associated device resources
|
||||
|
@ -377,7 +358,7 @@ static const struct pci_device_id pci_ids [] = { {
|
|||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE (pci, pci_ids);
|
||||
MODULE_DEVICE_TABLE(pci, pci_ids);
|
||||
|
||||
/* pci driver glue; this is a "new style" PCI driver module */
|
||||
static struct pci_driver ehci_pci_driver = {
|
||||
|
@ -393,22 +374,22 @@ static struct pci_driver ehci_pci_driver = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static int __init ehci_hcd_pci_init (void)
|
||||
static int __init ehci_hcd_pci_init(void)
|
||||
{
|
||||
if (usb_disabled())
|
||||
return -ENODEV;
|
||||
|
||||
pr_debug ("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
|
||||
pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n",
|
||||
hcd_name,
|
||||
sizeof (struct ehci_qh), sizeof (struct ehci_qtd),
|
||||
sizeof (struct ehci_itd), sizeof (struct ehci_sitd));
|
||||
sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
|
||||
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
|
||||
|
||||
return pci_register_driver (&ehci_pci_driver);
|
||||
return pci_register_driver(&ehci_pci_driver);
|
||||
}
|
||||
module_init (ehci_hcd_pci_init);
|
||||
module_init(ehci_hcd_pci_init);
|
||||
|
||||
static void __exit ehci_hcd_pci_cleanup (void)
|
||||
static void __exit ehci_hcd_pci_cleanup(void)
|
||||
{
|
||||
pci_unregister_driver (&ehci_pci_driver);
|
||||
pci_unregister_driver(&ehci_pci_driver);
|
||||
}
|
||||
module_exit (ehci_hcd_pci_cleanup);
|
||||
module_exit(ehci_hcd_pci_cleanup);
|
||||
|
|
|
@ -14,15 +14,6 @@
|
|||
* This file is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/pmac_feature.h>
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <asm/prom.h>
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PCI
|
||||
#error "This file is PCI bus glue. CONFIG_PCI must be defined."
|
||||
#endif
|
||||
|
@ -115,39 +106,12 @@ ohci_pci_start (struct usb_hcd *hcd)
|
|||
static int ohci_pci_suspend (struct usb_hcd *hcd, pm_message_t message)
|
||||
{
|
||||
/* root hub was already suspended */
|
||||
|
||||
/* FIXME these PMAC things get called in the wrong places. ASIC
|
||||
* clocks should be turned off AFTER entering D3, and on BEFORE
|
||||
* trying to enter D0. Evidently the PCI layer doesn't currently
|
||||
* provide the right sort of platform hooks for this ...
|
||||
*/
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
if (_machine == _MACH_Pmac) {
|
||||
struct device_node *of_node;
|
||||
|
||||
/* Disable USB PAD & cell clock */
|
||||
of_node = pci_device_to_OF_node (to_pci_dev(hcd->self.controller));
|
||||
if (of_node)
|
||||
pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, 0);
|
||||
}
|
||||
#endif /* CONFIG_PPC_PMAC */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int ohci_pci_resume (struct usb_hcd *hcd)
|
||||
{
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
if (_machine == _MACH_Pmac) {
|
||||
struct device_node *of_node;
|
||||
|
||||
/* Re-enable USB PAD & cell clock */
|
||||
of_node = pci_device_to_OF_node (to_pci_dev(hcd->self.controller));
|
||||
if (of_node)
|
||||
pmac_call_feature (PMAC_FTR_USB_ENABLE, of_node, 0, 1);
|
||||
}
|
||||
#endif /* CONFIG_PPC_PMAC */
|
||||
|
||||
usb_hcd_resume_root_hub(hcd);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -199,7 +199,7 @@ static void sn9c102_release_buffers(struct sn9c102_device* cam)
|
|||
{
|
||||
if (cam->nbuffers) {
|
||||
rvfree(cam->frame[0].bufmem,
|
||||
cam->nbuffers * cam->frame[0].buf.length);
|
||||
cam->nbuffers * PAGE_ALIGN(cam->frame[0].buf.length));
|
||||
cam->nbuffers = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -475,6 +475,8 @@ static struct usb_device_id id_table_combined [] = {
|
|||
{ USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
|
||||
{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16HR_PID) },
|
||||
{ USB_DEVICE(KOBIL_VID, KOBIL_CONV_B1_PID) },
|
||||
{ USB_DEVICE(KOBIL_VID, KOBIL_CONV_KAAN_PID) },
|
||||
{ }, /* Optional parameter entry */
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
|
|
@ -127,6 +127,13 @@
|
|||
#define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */
|
||||
#define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */
|
||||
|
||||
/*
|
||||
* The following are the values for two KOBIL chipcard terminals.
|
||||
*/
|
||||
#define KOBIL_VID 0x0d46 /* KOBIL Vendor ID */
|
||||
#define KOBIL_CONV_B1_PID 0x2020 /* KOBIL Konverter for B1 */
|
||||
#define KOBIL_CONV_KAAN_PID 0x2021 /* KOBIL_Konverter for KAAN */
|
||||
|
||||
/*
|
||||
* DSS-20 Sync Station for Sony Ericsson P800
|
||||
*/
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/usb.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include "usb-serial.h"
|
||||
|
||||
|
|
|
@ -1118,6 +1118,15 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
|
|||
US_SC_DEVICE, US_PR_DEVICE, NULL,
|
||||
US_FL_GO_SLOW ),
|
||||
|
||||
/*
|
||||
* David Härdeman <david@2gen.com>
|
||||
* The key makes the SCSI stack print confusing (but harmless) messages
|
||||
*/
|
||||
UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
|
||||
"Iomega",
|
||||
"Micro Mini 1GB",
|
||||
US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
|
||||
|
||||
#ifdef CONFIG_USB_STORAGE_SDDR55
|
||||
UNUSUAL_DEV( 0x55aa, 0xa103, 0x0000, 0x9999,
|
||||
"Sandisk",
|
||||
|
|
|
@ -452,13 +452,17 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
|
|||
|
||||
/* Return if no suitable logo was found */
|
||||
fb_logo.logo = fb_find_logo(depth);
|
||||
|
||||
if (!fb_logo.logo) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
|
||||
yres = info->var.yres;
|
||||
else
|
||||
yres = info->var.xres;
|
||||
|
||||
if (fb_logo.logo && fb_logo.logo->height > yres) {
|
||||
if (fb_logo.logo->height > yres) {
|
||||
fb_logo.logo = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -668,7 +668,7 @@ static inline int de_thread(struct task_struct *tsk)
|
|||
if (!thread_group_leader(current)) {
|
||||
struct task_struct *parent;
|
||||
struct dentry *proc_dentry1, *proc_dentry2;
|
||||
unsigned long exit_state, ptrace;
|
||||
unsigned long ptrace;
|
||||
|
||||
/*
|
||||
* Wait for the thread group leader to be a zombie.
|
||||
|
@ -726,15 +726,15 @@ static inline int de_thread(struct task_struct *tsk)
|
|||
list_del(¤t->tasks);
|
||||
list_add_tail(¤t->tasks, &init_task.tasks);
|
||||
current->exit_signal = SIGCHLD;
|
||||
exit_state = leader->exit_state;
|
||||
|
||||
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
|
||||
leader->exit_state = EXIT_DEAD;
|
||||
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
spin_unlock(&leader->proc_lock);
|
||||
spin_unlock(¤t->proc_lock);
|
||||
proc_pid_flush(proc_dentry1);
|
||||
proc_pid_flush(proc_dentry2);
|
||||
|
||||
BUG_ON(exit_state != EXIT_ZOMBIE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -82,28 +82,28 @@
|
|||
do { \
|
||||
printk(JFFS2_ERR_MSG_PREFIX \
|
||||
" (%d) %s: " fmt, current->pid, \
|
||||
__FUNCTION__, ##__VA_ARGS__); \
|
||||
__FUNCTION__ , ##__VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
#define JFFS2_WARNING(fmt, ...) \
|
||||
do { \
|
||||
printk(JFFS2_WARN_MSG_PREFIX \
|
||||
" (%d) %s: " fmt, current->pid, \
|
||||
__FUNCTION__, ##__VA_ARGS__); \
|
||||
__FUNCTION__ , ##__VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
#define JFFS2_NOTICE(fmt, ...) \
|
||||
do { \
|
||||
printk(JFFS2_NOTICE_MSG_PREFIX \
|
||||
" (%d) %s: " fmt, current->pid, \
|
||||
__FUNCTION__, ##__VA_ARGS__); \
|
||||
__FUNCTION__ , ##__VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
#define JFFS2_DEBUG(fmt, ...) \
|
||||
do { \
|
||||
printk(JFFS2_DBG_MSG_PREFIX \
|
||||
" (%d) %s: " fmt, current->pid, \
|
||||
__FUNCTION__, ##__VA_ARGS__); \
|
||||
__FUNCTION__ , ##__VA_ARGS__); \
|
||||
} while(0)
|
||||
|
||||
/*
|
||||
|
|
|
@ -118,8 +118,6 @@ static __inline__ long atomic_add_return(int i, atomic_t * v)
|
|||
return result;
|
||||
}
|
||||
|
||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||
|
||||
static __inline__ long atomic64_add_return(long i, atomic64_t * v)
|
||||
{
|
||||
long temp, result;
|
||||
|
@ -189,6 +187,9 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
|
|||
})
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
|
||||
#define atomic_dec_return(v) atomic_sub_return(1,(v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
|
||||
|
||||
|
@ -199,6 +200,8 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
|
|||
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
|
||||
|
||||
#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
|
||||
#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
|
||||
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
|
||||
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* IOP3xx architecture timex specifications
|
||||
*/
|
||||
#include <linux/config.h>
|
||||
|
||||
#include <asm/hardware.h>
|
||||
|
||||
#if defined(CONFIG_ARCH_IQ80321) || defined(CONFIG_ARCH_IQ31244)
|
||||
|
||||
|
|
|
@ -135,9 +135,9 @@ extern unsigned int HPAGE_SHIFT;
|
|||
|
||||
#define in_hugepage_area(context, addr) \
|
||||
(cpu_has_feature(CPU_FTR_16M_PAGE) && \
|
||||
( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
|
||||
( ((addr) < 0x100000000L) && \
|
||||
((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
|
||||
( ( (addr) >= 0x100000000UL) \
|
||||
? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \
|
||||
: ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) )
|
||||
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ extern int atomic64_sub_ret(int, atomic64_t *);
|
|||
* other cases.
|
||||
*/
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
|
||||
#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0)
|
||||
|
|
|
@ -160,8 +160,8 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
|
|||
|
||||
/**
|
||||
* atomic_add_negative - add and test if negative
|
||||
* @v: pointer of type atomic_t
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns true
|
||||
* if the result is negative, or false when
|
||||
|
@ -178,6 +178,31 @@ static __inline__ int atomic_add_negative(int i, atomic_t *v)
|
|||
return c;
|
||||
}
|
||||
|
||||
/**
|
||||
* atomic_add_return - add and return
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type atomic_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static __inline__ int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
int __i = i;
|
||||
__asm__ __volatile__(
|
||||
LOCK "xaddl %0, %1;"
|
||||
:"=r"(i)
|
||||
:"m"(v->counter), "0"(i));
|
||||
return i + __i;
|
||||
}
|
||||
|
||||
static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(-i,v);
|
||||
}
|
||||
|
||||
#define atomic_inc_return(v) (atomic_add_return(1,v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1,v))
|
||||
|
||||
/* An 64bit atomic type */
|
||||
|
||||
typedef struct { volatile long counter; } atomic64_t;
|
||||
|
@ -320,14 +345,14 @@ static __inline__ int atomic64_inc_and_test(atomic64_t *v)
|
|||
|
||||
/**
|
||||
* atomic64_add_negative - add and test if negative
|
||||
* @v: pointer to atomic64_t
|
||||
* @i: integer value to add
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns true
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static __inline__ long atomic64_add_negative(long i, atomic64_t *v)
|
||||
static __inline__ int atomic64_add_negative(long i, atomic64_t *v)
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
|
@ -339,27 +364,30 @@ static __inline__ long atomic64_add_negative(long i, atomic64_t *v)
|
|||
}
|
||||
|
||||
/**
|
||||
* atomic_add_return - add and return
|
||||
* @v: pointer of type atomic_t
|
||||
* atomic64_add_return - add and return
|
||||
* @i: integer value to add
|
||||
* @v: pointer to type atomic64_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static __inline__ int atomic_add_return(int i, atomic_t *v)
|
||||
static __inline__ long atomic64_add_return(long i, atomic64_t *v)
|
||||
{
|
||||
int __i = i;
|
||||
long __i = i;
|
||||
__asm__ __volatile__(
|
||||
LOCK "xaddl %0, %1;"
|
||||
LOCK "xaddq %0, %1;"
|
||||
:"=r"(i)
|
||||
:"m"(v->counter), "0"(i));
|
||||
return i + __i;
|
||||
}
|
||||
|
||||
static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
||||
static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
|
||||
{
|
||||
return atomic_add_return(-i,v);
|
||||
return atomic64_add_return(-i,v);
|
||||
}
|
||||
|
||||
#define atomic64_inc_return(v) (atomic64_add_return(1,v))
|
||||
#define atomic64_dec_return(v) (atomic64_sub_return(1,v))
|
||||
|
||||
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
|
||||
|
||||
/**
|
||||
|
@ -381,9 +409,6 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
|
|||
})
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
#define atomic_inc_return(v) (atomic_add_return(1,v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1,v))
|
||||
|
||||
/* These are x86-specific, used by some header files */
|
||||
#define atomic_clear_mask(mask, addr) \
|
||||
__asm__ __volatile__(LOCK "andl %0,%1" \
|
||||
|
|
|
@ -94,13 +94,7 @@ void smp_prepare_boot_cpu(void);
|
|||
*/
|
||||
#define raw_smp_processor_id() 0
|
||||
#define hard_smp_processor_id() 0
|
||||
|
||||
static inline int smp_call_function(void (*func) (void *info), void *info,
|
||||
int retry, int wait)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define smp_call_function(func,info,retry,wait) ({ 0; })
|
||||
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
|
||||
static inline void smp_send_reschedule(int cpu) { }
|
||||
#define num_booting_cpus() 1
|
||||
|
|
|
@ -47,6 +47,7 @@ struct usb_driver;
|
|||
* @urb_list: urbs queued to this endpoint; maintained by usbcore
|
||||
* @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
|
||||
* with one or more transfer descriptors (TDs) per urb
|
||||
* @kobj: kobject for sysfs info
|
||||
* @extra: descriptors following this endpoint in the configuration
|
||||
* @extralen: how many bytes of "extra" are valid
|
||||
*
|
||||
|
|
|
@ -201,21 +201,6 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
|
|||
* from swap. But that's a lot of code to duplicate here
|
||||
* for a rare case, so we simply fetch the page.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Do a quick atomic lookup first - this is the fastpath.
|
||||
*/
|
||||
page = follow_page(mm, uaddr, FOLL_TOUCH|FOLL_GET);
|
||||
if (likely(page != NULL)) {
|
||||
key->shared.pgoff =
|
||||
page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
|
||||
put_page(page);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do it the general way.
|
||||
*/
|
||||
err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
|
||||
if (err >= 0) {
|
||||
key->shared.pgoff =
|
||||
|
|
|
@ -36,6 +36,9 @@ void synchronize_irq(unsigned int irq)
|
|||
{
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
|
||||
if (irq >= NR_IRQS)
|
||||
return;
|
||||
|
||||
while (desc->status & IRQ_INPROGRESS)
|
||||
cpu_relax();
|
||||
}
|
||||
|
@ -60,6 +63,9 @@ void disable_irq_nosync(unsigned int irq)
|
|||
irq_desc_t *desc = irq_desc + irq;
|
||||
unsigned long flags;
|
||||
|
||||
if (irq >= NR_IRQS)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
if (!desc->depth++) {
|
||||
desc->status |= IRQ_DISABLED;
|
||||
|
@ -86,6 +92,9 @@ void disable_irq(unsigned int irq)
|
|||
{
|
||||
irq_desc_t *desc = irq_desc + irq;
|
||||
|
||||
if (irq >= NR_IRQS)
|
||||
return;
|
||||
|
||||
disable_irq_nosync(irq);
|
||||
if (desc->action)
|
||||
synchronize_irq(irq);
|
||||
|
@ -108,6 +117,9 @@ void enable_irq(unsigned int irq)
|
|||
irq_desc_t *desc = irq_desc + irq;
|
||||
unsigned long flags;
|
||||
|
||||
if (irq >= NR_IRQS)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
switch (desc->depth) {
|
||||
case 0:
|
||||
|
@ -163,6 +175,9 @@ int setup_irq(unsigned int irq, struct irqaction * new)
|
|||
unsigned long flags;
|
||||
int shared = 0;
|
||||
|
||||
if (irq >= NR_IRQS)
|
||||
return -EINVAL;
|
||||
|
||||
if (desc->handler == &no_irq_type)
|
||||
return -ENOSYS;
|
||||
/*
|
||||
|
|
|
@ -956,7 +956,7 @@ int unregister_console(struct console *console)
|
|||
if (console_drivers == console) {
|
||||
console_drivers=console->next;
|
||||
res = 0;
|
||||
} else {
|
||||
} else if (console_drivers) {
|
||||
for (a=console_drivers->next, b=console_drivers ;
|
||||
a; b=a, a=b->next) {
|
||||
if (a == console) {
|
||||
|
|
|
@ -125,12 +125,10 @@ comment "Memory hotplug is currently incompatible with Software Suspend"
|
|||
# space can be handled with less contention: split it at this NR_CPUS.
|
||||
# Default to 4 for wider testing, though 8 might be more appropriate.
|
||||
# ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
|
||||
# PA-RISC's debug spinlock_t is too large for the 32-bit struct page.
|
||||
# ARM26 and SPARC32 and PPC64 may use one page for multiple page tables.
|
||||
# PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
|
||||
#
|
||||
config SPLIT_PTLOCK_CPUS
|
||||
int
|
||||
default "4096" if ARM && !CPU_CACHE_VIPT
|
||||
default "4096" if PARISC && DEBUG_SPINLOCK && !64BIT
|
||||
default "4096" if ARM26 || SPARC32 || PPC64
|
||||
default "4096" if PARISC && !PA20
|
||||
default "4"
|
||||
|
|
|
@ -282,8 +282,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
|||
* Zap the rest of the file in one hit.
|
||||
*/
|
||||
unmap_mapping_range(mapping,
|
||||
page_index << PAGE_CACHE_SHIFT,
|
||||
(end - page_index + 1)
|
||||
(loff_t)page_index<<PAGE_CACHE_SHIFT,
|
||||
(loff_t)(end - page_index + 1)
|
||||
<< PAGE_CACHE_SHIFT,
|
||||
0);
|
||||
did_range_unmap = 1;
|
||||
|
@ -292,7 +292,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
|||
* Just zap this page
|
||||
*/
|
||||
unmap_mapping_range(mapping,
|
||||
page_index << PAGE_CACHE_SHIFT,
|
||||
(loff_t)page_index<<PAGE_CACHE_SHIFT,
|
||||
PAGE_CACHE_SIZE, 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -366,6 +366,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
|
|||
|
||||
spin_lock_bh(&br->lock);
|
||||
br_stp_recalculate_bridge_id(br);
|
||||
br_features_recompute(br);
|
||||
if ((br->dev->flags & IFF_UP)
|
||||
&& (dev->flags & IFF_UP) && netif_carrier_ok(dev))
|
||||
br_stp_enable_port(p);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#include <linux/netfilter.h>
|
||||
|
|
Loading…
Reference in New Issue
Block a user