forked from luck/tmp_suning_uos_patched
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
The BTF conflicts were simple overlapping changes. The virtio_net conflict was an overlap of a fix of statistics counter, happening alongisde a move over to a bonafide statistics structure rather than counting value on the stack. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
89b1698c93
|
@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
|
|||
the node is not important. The content of the node is defined in dwc3.txt.
|
||||
|
||||
Phy documentation is provided in the following places:
|
||||
Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
|
||||
Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
|
||||
Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
|
||||
|
||||
Example device nodes:
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
.. include:: <isonum.txt>
|
||||
|
||||
=========================================================
|
||||
DPAA2 (Data Path Acceleration Architecture Gen2) Overview
|
||||
=========================================================
|
||||
|
||||
|
|
|
@ -7095,6 +7095,7 @@ F: include/uapi/linux/input.h
|
|||
F: include/uapi/linux/input-event-codes.h
|
||||
F: include/linux/input/
|
||||
F: Documentation/devicetree/bindings/input/
|
||||
F: Documentation/devicetree/bindings/serio/
|
||||
F: Documentation/input/
|
||||
|
||||
INPUT MULTITOUCH (MT) PROTOCOL
|
||||
|
@ -7984,7 +7985,7 @@ F: lib/test_kmod.c
|
|||
F: tools/testing/selftests/kmod/
|
||||
|
||||
KPROBES
|
||||
M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
|
||||
M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
|
||||
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Masami Hiramatsu <mhiramat@kernel.org>
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -48,6 +48,7 @@ saved_pc .req lr
|
|||
* from those features make this path too inefficient.
|
||||
*/
|
||||
ret_fast_syscall:
|
||||
__ret_fast_syscall:
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.cantunwind )
|
||||
disable_irq_notrace @ disable interrupts
|
||||
|
@ -78,6 +79,7 @@ fast_work_pending:
|
|||
* call.
|
||||
*/
|
||||
ret_fast_syscall:
|
||||
__ret_fast_syscall:
|
||||
UNWIND(.fnstart )
|
||||
UNWIND(.cantunwind )
|
||||
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
|
||||
|
@ -255,7 +257,7 @@ local_restart:
|
|||
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
|
||||
bne __sys_trace
|
||||
|
||||
invoke_syscall tbl, scno, r10, ret_fast_syscall
|
||||
invoke_syscall tbl, scno, r10, __ret_fast_syscall
|
||||
|
||||
add r1, sp, #S_OFF
|
||||
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
|
||||
|
|
|
@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
|
|||
|
||||
static int __init gate_vma_init(void)
|
||||
{
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -212,7 +212,7 @@ static DEFINE_MUTEX(ecard_mutex);
|
|||
*/
|
||||
static void ecard_init_pgtables(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct vma;
|
||||
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, VM_EXEC);
|
||||
|
||||
/* We want to set up the page tables for the following mapping:
|
||||
* Virtual Physical
|
||||
|
@ -237,9 +237,6 @@ static void ecard_init_pgtables(struct mm_struct *mm)
|
|||
|
||||
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
|
||||
|
||||
vma.vm_flags = VM_EXEC;
|
||||
vma.vm_mm = mm;
|
||||
|
||||
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
|
||||
flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ static inline void __tlb_remove_table(void *_table)
|
|||
|
||||
static inline void tlb_flush(struct mmu_gather *tlb)
|
||||
{
|
||||
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
|
||||
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
|
||||
|
||||
/*
|
||||
* The ASID allocator will either invalidate the ASID or mark
|
||||
|
|
|
@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
|||
|
||||
static void update_cpu_capabilities(u16 scope_mask)
|
||||
{
|
||||
__update_cpu_capabilities(arm64_features, scope_mask, "detected:");
|
||||
__update_cpu_capabilities(arm64_errata, scope_mask,
|
||||
"enabling workaround for");
|
||||
__update_cpu_capabilities(arm64_features, scope_mask, "detected:");
|
||||
}
|
||||
|
||||
static int __enable_cpu_capability(void *arg)
|
||||
|
@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
|||
|
||||
static void __init enable_cpu_capabilities(u16 scope_mask)
|
||||
{
|
||||
__enable_cpu_capabilities(arm64_features, scope_mask);
|
||||
__enable_cpu_capabilities(arm64_errata, scope_mask);
|
||||
__enable_cpu_capabilities(arm64_features, scope_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -108,7 +108,6 @@ static pte_t get_clear_flush(struct mm_struct *mm,
|
|||
unsigned long pgsize,
|
||||
unsigned long ncontig)
|
||||
{
|
||||
struct vm_area_struct vma = { .vm_mm = mm };
|
||||
pte_t orig_pte = huge_ptep_get(ptep);
|
||||
bool valid = pte_valid(orig_pte);
|
||||
unsigned long i, saddr = addr;
|
||||
|
@ -125,8 +124,10 @@ static pte_t get_clear_flush(struct mm_struct *mm,
|
|||
orig_pte = pte_mkdirty(orig_pte);
|
||||
}
|
||||
|
||||
if (valid)
|
||||
if (valid) {
|
||||
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
|
||||
flush_tlb_range(&vma, saddr, addr);
|
||||
}
|
||||
return orig_pte;
|
||||
}
|
||||
|
||||
|
@ -145,7 +146,7 @@ static void clear_flush(struct mm_struct *mm,
|
|||
unsigned long pgsize,
|
||||
unsigned long ncontig)
|
||||
{
|
||||
struct vm_area_struct vma = { .vm_mm = mm };
|
||||
struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
|
||||
unsigned long i, saddr = addr;
|
||||
|
||||
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
|
||||
|
|
|
@ -611,11 +611,13 @@ void __init mem_init(void)
|
|||
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
/*
|
||||
* Make sure we chose the upper bound of sizeof(struct page)
|
||||
* correctly.
|
||||
* correctly when sizing the VMEMMAP array.
|
||||
*/
|
||||
BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
|
||||
#endif
|
||||
|
||||
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
|
||||
extern int sysctl_overcommit_memory;
|
||||
|
|
|
@ -115,12 +115,11 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
|
|||
flush_tlb_all();
|
||||
} else {
|
||||
/*
|
||||
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
|
||||
* vma pointer.
|
||||
* flush_tlb_range() takes a vma instead of a mm pointer because
|
||||
* some architectures want the vm_flags for ITLB/DTLB flush.
|
||||
*/
|
||||
struct vm_area_struct vma;
|
||||
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
|
||||
|
||||
vma.vm_mm = tlb->mm;
|
||||
/* flush the address range from the tlb: */
|
||||
flush_tlb_range(&vma, start, end);
|
||||
/* now flush the virt. page-table area mapping the address range: */
|
||||
|
|
|
@ -116,6 +116,7 @@ ia64_init_addr_space (void)
|
|||
*/
|
||||
vma = vm_area_alloc(current->mm);
|
||||
if (vma) {
|
||||
vma_set_anonymous(vma);
|
||||
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
||||
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
||||
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
||||
|
@ -133,6 +134,7 @@ ia64_init_addr_space (void)
|
|||
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
||||
vma = vm_area_alloc(current->mm);
|
||||
if (vma) {
|
||||
vma_set_anonymous(vma);
|
||||
vma->vm_end = PAGE_SIZE;
|
||||
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
||||
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
||||
|
@ -273,7 +275,7 @@ static struct vm_area_struct gate_vma;
|
|||
|
||||
static int __init gate_vma_init(void)
|
||||
{
|
||||
gate_vma.vm_mm = NULL;
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_start = FIXADDR_USER_START;
|
||||
gate_vma.vm_end = FIXADDR_USER_END;
|
||||
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
||||
|
|
|
@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
|
|||
*/
|
||||
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
|
||||
cpu_wait = NULL;
|
||||
|
||||
/*
|
||||
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
|
||||
* Enable ExternalSync for sync instruction to take effect
|
||||
*/
|
||||
set_c0_config7(MIPS_CONF7_ES);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -681,8 +681,6 @@
|
|||
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
|
||||
|
||||
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
|
||||
/* ExternalSync */
|
||||
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
|
||||
|
||||
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
|
||||
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
|
||||
|
@ -2767,7 +2765,6 @@ __BUILD_SET_C0(status)
|
|||
__BUILD_SET_C0(cause)
|
||||
__BUILD_SET_C0(config)
|
||||
__BUILD_SET_C0(config5)
|
||||
__BUILD_SET_C0(config7)
|
||||
__BUILD_SET_C0(intcontrol)
|
||||
__BUILD_SET_C0(intctl)
|
||||
__BUILD_SET_C0(srsmap)
|
||||
|
|
|
@ -13,6 +13,7 @@ generic-y += local64.h
|
|||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += module.h
|
||||
generic-y += msi.h
|
||||
generic-y += preempt.h
|
||||
generic-y += rwsem.h
|
||||
generic-y += serial.h
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* msi.h: Defines specific to the MBus - Sbus - Interface.
|
||||
*
|
||||
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
|
||||
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
|
||||
*/
|
||||
|
||||
#ifndef _SPARC_MSI_H
|
||||
#define _SPARC_MSI_H
|
||||
|
||||
/*
|
||||
* Locations of MSI Registers.
|
||||
*/
|
||||
#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
|
||||
|
||||
/*
|
||||
* Useful bits in the MSI Registers.
|
||||
*/
|
||||
#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
|
||||
|
||||
|
||||
static inline void msi_set_sync(void)
|
||||
{
|
||||
__asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
|
||||
"andn %%g3, %2, %%g3\n\t"
|
||||
"sta %%g3, [%0] %1\n\t" : :
|
||||
"r" (MSI_MBUS_ARBEN),
|
||||
"i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
|
||||
}
|
||||
|
||||
#endif /* !(_SPARC_MSI_H) */
|
|
@ -814,7 +814,7 @@ static void __init get_tick_patch(void)
|
|||
}
|
||||
}
|
||||
|
||||
static void init_tick_ops(struct sparc64_tick_ops *ops)
|
||||
static void __init init_tick_ops(struct sparc64_tick_ops *ops)
|
||||
{
|
||||
unsigned long freq, quotient, tick;
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <asm/mbus.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/asi.h>
|
||||
#include <asm/msi.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
|
@ -116,6 +115,25 @@ static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
|
|||
set_pte((pte_t *)ctxp, pte);
|
||||
}
|
||||
|
||||
/*
|
||||
* Locations of MSI Registers.
|
||||
*/
|
||||
#define MSI_MBUS_ARBEN 0xe0001008 /* MBus Arbiter Enable register */
|
||||
|
||||
/*
|
||||
* Useful bits in the MSI Registers.
|
||||
*/
|
||||
#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
|
||||
|
||||
static void msi_set_sync(void)
|
||||
{
|
||||
__asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
|
||||
"andn %%g3, %2, %%g3\n\t"
|
||||
"sta %%g3, [%0] %1\n\t" : :
|
||||
"r" (MSI_MBUS_ARBEN),
|
||||
"i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
|
||||
}
|
||||
|
||||
void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
unsigned long ptp; /* Physical address, shifted right by 4 */
|
||||
|
|
|
@ -106,9 +106,13 @@ define cmd_check_data_rel
|
|||
done
|
||||
endef
|
||||
|
||||
# We need to run two commands under "if_changed", so merge them into a
|
||||
# single invocation.
|
||||
quiet_cmd_check-and-link-vmlinux = LD $@
|
||||
cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
|
||||
|
||||
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
|
||||
$(call if_changed,check_data_rel)
|
||||
$(call if_changed,ld)
|
||||
$(call if_changed,check-and-link-vmlinux)
|
||||
|
||||
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
|
||||
$(obj)/vmlinux.bin: vmlinux FORCE
|
||||
|
|
|
@ -981,7 +981,7 @@ ENTRY(\sym)
|
|||
|
||||
call \do_sym
|
||||
|
||||
jmp error_exit /* %ebx: no swapgs flag */
|
||||
jmp error_exit
|
||||
.endif
|
||||
END(\sym)
|
||||
.endm
|
||||
|
@ -1222,7 +1222,6 @@ END(paranoid_exit)
|
|||
|
||||
/*
|
||||
* Save all registers in pt_regs, and switch GS if needed.
|
||||
* Return: EBX=0: came from user mode; EBX=1: otherwise
|
||||
*/
|
||||
ENTRY(error_entry)
|
||||
UNWIND_HINT_FUNC
|
||||
|
@ -1269,7 +1268,6 @@ ENTRY(error_entry)
|
|||
* for these here too.
|
||||
*/
|
||||
.Lerror_kernelspace:
|
||||
incl %ebx
|
||||
leaq native_irq_return_iret(%rip), %rcx
|
||||
cmpq %rcx, RIP+8(%rsp)
|
||||
je .Lerror_bad_iret
|
||||
|
@ -1303,28 +1301,20 @@ ENTRY(error_entry)
|
|||
|
||||
/*
|
||||
* Pretend that the exception came from user mode: set up pt_regs
|
||||
* as if we faulted immediately after IRET and clear EBX so that
|
||||
* error_exit knows that we will be returning to user mode.
|
||||
* as if we faulted immediately after IRET.
|
||||
*/
|
||||
mov %rsp, %rdi
|
||||
call fixup_bad_iret
|
||||
mov %rax, %rsp
|
||||
decl %ebx
|
||||
jmp .Lerror_entry_from_usermode_after_swapgs
|
||||
END(error_entry)
|
||||
|
||||
|
||||
/*
|
||||
* On entry, EBX is a "return to kernel mode" flag:
|
||||
* 1: already in kernel mode, don't need SWAPGS
|
||||
* 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
|
||||
*/
|
||||
ENTRY(error_exit)
|
||||
UNWIND_HINT_REGS
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
TRACE_IRQS_OFF
|
||||
testl %ebx, %ebx
|
||||
jnz retint_kernel
|
||||
testb $3, CS(%rsp)
|
||||
jz retint_kernel
|
||||
jmp retint_user
|
||||
END(error_exit)
|
||||
|
||||
|
|
|
@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
|
|||
{
|
||||
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
|
||||
struct perf_event *event = pcpu->event;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct hw_perf_event *hwc;
|
||||
struct perf_sample_data data;
|
||||
struct perf_raw_record raw;
|
||||
struct pt_regs regs;
|
||||
|
@ -602,6 +602,10 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!event))
|
||||
goto fail;
|
||||
|
||||
hwc = &event->hw;
|
||||
msr = hwc->config_base;
|
||||
buf = ibs_data.regs;
|
||||
rdmsrl(msr, *buf);
|
||||
|
|
|
@ -2997,6 +2997,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
}
|
||||
if (x86_pmu.pebs_aliases)
|
||||
x86_pmu.pebs_aliases(event);
|
||||
|
||||
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
|
||||
event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
|
||||
}
|
||||
|
||||
if (needs_branch_stack(event)) {
|
||||
|
|
|
@ -1185,17 +1185,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
|
|||
data->data_src.val = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* We must however always use iregs for the unwinder to stay sane; the
|
||||
* record BP,SP,IP can point into thin air when the record is from a
|
||||
* previous PMI context or an (I)RET happend between the record and
|
||||
* PMI.
|
||||
*/
|
||||
if (sample_type & PERF_SAMPLE_CALLCHAIN)
|
||||
data->callchain = perf_callchain(event, iregs);
|
||||
|
||||
/*
|
||||
* We use the interrupt regs as a base because the PEBS record does not
|
||||
* contain a full regs set, specifically it seems to lack segment
|
||||
* descriptors, which get used by things like user_mode().
|
||||
*
|
||||
* In the simple case fix up only the IP for PERF_SAMPLE_IP.
|
||||
*
|
||||
* We must however always use BP,SP from iregs for the unwinder to stay
|
||||
* sane; the record BP,SP can point into thin air when the record is
|
||||
* from a previous PMI context or an (I)RET happend between the record
|
||||
* and PMI.
|
||||
*/
|
||||
*regs = *iregs;
|
||||
|
||||
|
@ -1214,15 +1218,8 @@ static void setup_pebs_sample_data(struct perf_event *event,
|
|||
regs->si = pebs->si;
|
||||
regs->di = pebs->di;
|
||||
|
||||
/*
|
||||
* Per the above; only set BP,SP if we don't need callchains.
|
||||
*
|
||||
* XXX: does this make sense?
|
||||
*/
|
||||
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
||||
regs->bp = pebs->bp;
|
||||
regs->sp = pebs->sp;
|
||||
}
|
||||
regs->bp = pebs->bp;
|
||||
regs->sp = pebs->sp;
|
||||
|
||||
#ifndef CONFIG_X86_32
|
||||
regs->r8 = pebs->r8;
|
||||
|
|
|
@ -43,7 +43,7 @@ asm (".pushsection .text;"
|
|||
"push %rdx;"
|
||||
"mov $0x1,%eax;"
|
||||
"xor %edx,%edx;"
|
||||
"lock cmpxchg %dl,(%rdi);"
|
||||
LOCK_PREFIX "cmpxchg %dl,(%rdi);"
|
||||
"cmp $0x1,%al;"
|
||||
"jne .slowpath;"
|
||||
"pop %rdx;"
|
||||
|
|
|
@ -573,6 +573,9 @@ static u32 skx_deadline_rev(void)
|
|||
case 0x04: return 0x02000014;
|
||||
}
|
||||
|
||||
if (boot_cpu_data.x86_stepping > 4)
|
||||
return 0;
|
||||
|
||||
return ~0U;
|
||||
}
|
||||
|
||||
|
|
|
@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
|
|||
if (cache->nobjs >= min)
|
||||
return 0;
|
||||
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
|
||||
page = (void *)__get_free_page(GFP_KERNEL);
|
||||
page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
cache->objects[cache->nobjs++] = page;
|
||||
|
|
|
@ -1441,8 +1441,8 @@ static void emit_prologue(u8 **pprog, u32 stack_depth)
|
|||
|
||||
/* sub esp,STACK_SIZE */
|
||||
EMIT2_off32(0x81, 0xEC, STACK_SIZE);
|
||||
/* sub ebp,SCRATCH_SIZE+4+12*/
|
||||
EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 16);
|
||||
/* sub ebp,SCRATCH_SIZE+12*/
|
||||
EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
|
||||
/* xor ebx,ebx */
|
||||
EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
|
||||
|
||||
|
@ -1475,8 +1475,8 @@ static void emit_epilogue(u8 **pprog, u32 stack_depth)
|
|||
/* mov edx,dword ptr [ebp+off]*/
|
||||
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
|
||||
|
||||
/* add ebp,SCRATCH_SIZE+4+12*/
|
||||
EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 16);
|
||||
/* add ebp,SCRATCH_SIZE+12*/
|
||||
EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
|
||||
|
||||
/* mov ebx,dword ptr [ebp-12]*/
|
||||
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
|
||||
|
|
|
@ -417,7 +417,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
|
|||
if (!(md->attribute & EFI_MEMORY_WB))
|
||||
flags |= _PAGE_PCD;
|
||||
|
||||
if (sev_active())
|
||||
if (sev_active() && md->type != EFI_MEMORY_MAPPED_IO)
|
||||
flags |= _PAGE_ENC;
|
||||
|
||||
pfn = md->phys_addr >> PAGE_SHIFT;
|
||||
|
|
|
@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
|
|||
if (!FIXADDR_USER_START)
|
||||
return 0;
|
||||
|
||||
gate_vma.vm_mm = NULL;
|
||||
vma_init(&gate_vma, NULL);
|
||||
gate_vma.vm_start = FIXADDR_USER_START;
|
||||
gate_vma.vm_end = FIXADDR_USER_END;
|
||||
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
||||
|
|
54
block/bio.c
54
block/bio.c
|
@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
|
|||
EXPORT_SYMBOL(bio_add_page);
|
||||
|
||||
/**
|
||||
* bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
|
||||
* __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
|
||||
* @bio: bio to add pages to
|
||||
* @iter: iov iterator describing the region to be mapped
|
||||
*
|
||||
* Pins as many pages from *iter and appends them to @bio's bvec array. The
|
||||
* Pins pages from *iter and appends them to @bio's bvec array. The
|
||||
* pages will have to be released using put_page() when done.
|
||||
* For multi-segment *iter, this function only adds pages from the
|
||||
* the next non-empty segment of the iov iterator.
|
||||
*/
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
|
||||
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
|
||||
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
|
||||
struct page **pages = (struct page **)bv;
|
||||
size_t offset, diff;
|
||||
size_t offset;
|
||||
ssize_t size;
|
||||
|
||||
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
|
||||
if (unlikely(size <= 0))
|
||||
return size ? size : -EFAULT;
|
||||
nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* Deep magic below: We need to walk the pinned pages backwards
|
||||
|
@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
|||
bio->bi_iter.bi_size += size;
|
||||
bio->bi_vcnt += nr_pages;
|
||||
|
||||
diff = (nr_pages * PAGE_SIZE - offset) - size;
|
||||
while (nr_pages--) {
|
||||
bv[nr_pages].bv_page = pages[nr_pages];
|
||||
bv[nr_pages].bv_len = PAGE_SIZE;
|
||||
bv[nr_pages].bv_offset = 0;
|
||||
while (idx--) {
|
||||
bv[idx].bv_page = pages[idx];
|
||||
bv[idx].bv_len = PAGE_SIZE;
|
||||
bv[idx].bv_offset = 0;
|
||||
}
|
||||
|
||||
bv[0].bv_offset += offset;
|
||||
bv[0].bv_len -= offset;
|
||||
if (diff)
|
||||
bv[bio->bi_vcnt - 1].bv_len -= diff;
|
||||
bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
|
||||
|
||||
iov_iter_advance(iter, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
|
||||
* @bio: bio to add pages to
|
||||
* @iter: iov iterator describing the region to be mapped
|
||||
*
|
||||
* Pins pages from *iter and appends them to @bio's bvec array. The
|
||||
* pages will have to be released using put_page() when done.
|
||||
* The function tries, but does not guarantee, to pin as many pages as
|
||||
* fit into the bio, or are requested in *iter, whatever is smaller.
|
||||
* If MM encounters an error pinning the requested pages, it stops.
|
||||
* Error is returned only if 0 pages could be pinned.
|
||||
*/
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
unsigned short orig_vcnt = bio->bi_vcnt;
|
||||
|
||||
do {
|
||||
int ret = __bio_iov_iter_get_pages(bio, iter);
|
||||
|
||||
if (unlikely(ret))
|
||||
return bio->bi_vcnt > orig_vcnt ? 0 : ret;
|
||||
|
||||
} while (iov_iter_count(iter) && !bio_full(bio));
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
|
||||
|
||||
static void submit_bio_wait_endio(struct bio *bio)
|
||||
|
@ -1866,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
|||
bio_integrity_trim(split);
|
||||
|
||||
bio_advance(bio, split->bi_iter.bi_size);
|
||||
bio->bi_iter.bi_done = 0;
|
||||
|
||||
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
|
||||
bio_set_flag(split, BIO_TRACE_COMPLETION);
|
||||
|
|
|
@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
|
|||
bool shared = false;
|
||||
int cpu;
|
||||
|
||||
if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
|
||||
MQ_RQ_IN_FLIGHT)
|
||||
if (!blk_mq_mark_complete(rq))
|
||||
return;
|
||||
|
||||
if (rq->internal_tag != -1)
|
||||
blk_mq_sched_completed_request(rq);
|
||||
|
||||
|
|
|
@ -879,6 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
|
|||
#define LPSS_GPIODEF0_DMA_LLP BIT(13)
|
||||
|
||||
static DEFINE_MUTEX(lpss_iosf_mutex);
|
||||
static bool lpss_iosf_d3_entered;
|
||||
|
||||
static void lpss_iosf_enter_d3_state(void)
|
||||
{
|
||||
|
@ -921,6 +922,9 @@ static void lpss_iosf_enter_d3_state(void)
|
|||
|
||||
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
|
||||
LPSS_IOSF_GPIODEF0, value1, mask1);
|
||||
|
||||
lpss_iosf_d3_entered = true;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&lpss_iosf_mutex);
|
||||
}
|
||||
|
@ -935,6 +939,11 @@ static void lpss_iosf_exit_d3_state(void)
|
|||
|
||||
mutex_lock(&lpss_iosf_mutex);
|
||||
|
||||
if (!lpss_iosf_d3_entered)
|
||||
goto exit;
|
||||
|
||||
lpss_iosf_d3_entered = false;
|
||||
|
||||
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP, MBI_CR_WRITE,
|
||||
LPSS_IOSF_GPIODEF0, value1, mask1);
|
||||
|
||||
|
@ -944,13 +953,13 @@ static void lpss_iosf_exit_d3_state(void)
|
|||
iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1, MBI_CFG_WRITE,
|
||||
LPSS_IOSF_PMCSR, value2, mask2);
|
||||
|
||||
exit:
|
||||
mutex_unlock(&lpss_iosf_mutex);
|
||||
}
|
||||
|
||||
static int acpi_lpss_suspend(struct device *dev, bool runtime)
|
||||
static int acpi_lpss_suspend(struct device *dev, bool wakeup)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
bool wakeup = runtime || device_may_wakeup(dev);
|
||||
int ret;
|
||||
|
||||
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
|
||||
|
@ -963,14 +972,14 @@ static int acpi_lpss_suspend(struct device *dev, bool runtime)
|
|||
* wrong status for devices being about to be powered off. See
|
||||
* lpss_iosf_enter_d3_state() for further information.
|
||||
*/
|
||||
if ((runtime || !pm_suspend_via_firmware()) &&
|
||||
if (acpi_target_system_state() == ACPI_STATE_S0 &&
|
||||
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
lpss_iosf_enter_d3_state();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume(struct device *dev, bool runtime)
|
||||
static int acpi_lpss_resume(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
|
@ -979,8 +988,7 @@ static int acpi_lpss_resume(struct device *dev, bool runtime)
|
|||
* This call is kept first to be in symmetry with
|
||||
* acpi_lpss_runtime_suspend() one.
|
||||
*/
|
||||
if ((runtime || !pm_resume_via_firmware()) &&
|
||||
lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
|
||||
lpss_iosf_exit_d3_state();
|
||||
|
||||
ret = acpi_dev_resume(dev);
|
||||
|
@ -1004,12 +1012,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
|
|||
return 0;
|
||||
|
||||
ret = pm_generic_suspend_late(dev);
|
||||
return ret ? ret : acpi_lpss_suspend(dev, false);
|
||||
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
|
||||
}
|
||||
|
||||
static int acpi_lpss_resume_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev, false);
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
|
||||
return ret ? ret : pm_generic_resume_early(dev);
|
||||
}
|
||||
|
@ -1024,7 +1032,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
|
|||
|
||||
static int acpi_lpss_runtime_resume(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev, true);
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
|
||||
return ret ? ret : pm_generic_runtime_resume(dev);
|
||||
}
|
||||
|
|
|
@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
|
|||
status =
|
||||
acpi_ps_create_op(walk_state, aml_op_start, &op);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
/*
|
||||
* ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
|
||||
* executing it as a control method. However, if we encounter
|
||||
* an error while loading the table, we need to keep trying to
|
||||
* load the table rather than aborting the table load. Set the
|
||||
* status to AE_OK to proceed with the table load.
|
||||
*/
|
||||
if ((walk_state->
|
||||
parse_flags & ACPI_PARSE_MODULE_LEVEL)
|
||||
&& status == AE_ALREADY_EXISTS) {
|
||||
status = AE_OK;
|
||||
}
|
||||
if (status == AE_CTRL_PARSE_CONTINUE) {
|
||||
continue;
|
||||
}
|
||||
|
@ -694,6 +706,25 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
|
|||
acpi_ps_next_parse_state(walk_state, op, status);
|
||||
if (status == AE_CTRL_PENDING) {
|
||||
status = AE_OK;
|
||||
} else
|
||||
if ((walk_state->
|
||||
parse_flags & ACPI_PARSE_MODULE_LEVEL)
|
||||
&& status != AE_CTRL_TRANSFER
|
||||
&& ACPI_FAILURE(status)) {
|
||||
/*
|
||||
* ACPI_PARSE_MODULE_LEVEL flag means that we are currently
|
||||
* loading a table by executing it as a control method.
|
||||
* However, if we encounter an error while loading the table,
|
||||
* we need to keep trying to load the table rather than
|
||||
* aborting the table load (setting the status to AE_OK
|
||||
* continues the table load). If we get a failure at this
|
||||
* point, it means that the dispatcher got an error while
|
||||
* processing Op (most likely an AML operand error) or a
|
||||
* control method was called from module level and the
|
||||
* dispatcher returned AE_CTRL_TRANSFER. In the latter case,
|
||||
* leave the status alone, there's nothing wrong with it.
|
||||
*/
|
||||
status = AE_OK;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -434,14 +434,6 @@ static int really_probe(struct device *dev, struct device_driver *drv)
|
|||
goto probe_failed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure devices are listed in devices_kset in correct order
|
||||
* It's important to move Dev to the end of devices_kset before
|
||||
* calling .probe, because it could be recursive and parent Dev
|
||||
* should always go first
|
||||
*/
|
||||
devices_kset_move_last(dev);
|
||||
|
||||
if (dev->bus->probe) {
|
||||
ret = dev->bus->probe(dev);
|
||||
if (ret)
|
||||
|
|
|
@ -112,12 +112,16 @@ struct nbd_device {
|
|||
struct task_struct *task_setup;
|
||||
};
|
||||
|
||||
#define NBD_CMD_REQUEUED 1
|
||||
|
||||
struct nbd_cmd {
|
||||
struct nbd_device *nbd;
|
||||
struct mutex lock;
|
||||
int index;
|
||||
int cookie;
|
||||
struct completion send_complete;
|
||||
blk_status_t status;
|
||||
unsigned long flags;
|
||||
u32 cmd_cookie;
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_DEBUG_FS)
|
||||
|
@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
|
|||
return disk_to_dev(nbd->disk);
|
||||
}
|
||||
|
||||
static void nbd_requeue_cmd(struct nbd_cmd *cmd)
|
||||
{
|
||||
struct request *req = blk_mq_rq_from_pdu(cmd);
|
||||
|
||||
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
|
||||
blk_mq_requeue_request(req, true);
|
||||
}
|
||||
|
||||
#define NBD_COOKIE_BITS 32
|
||||
|
||||
static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
|
||||
{
|
||||
struct request *req = blk_mq_rq_from_pdu(cmd);
|
||||
u32 tag = blk_mq_unique_tag(req);
|
||||
u64 cookie = cmd->cmd_cookie;
|
||||
|
||||
return (cookie << NBD_COOKIE_BITS) | tag;
|
||||
}
|
||||
|
||||
static u32 nbd_handle_to_tag(u64 handle)
|
||||
{
|
||||
return (u32)handle;
|
||||
}
|
||||
|
||||
static u32 nbd_handle_to_cookie(u64 handle)
|
||||
{
|
||||
return (u32)(handle >> NBD_COOKIE_BITS);
|
||||
}
|
||||
|
||||
static const char *nbdcmd_to_ascii(int cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
|
@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
|||
}
|
||||
config = nbd->config;
|
||||
|
||||
if (!mutex_trylock(&cmd->lock))
|
||||
return BLK_EH_RESET_TIMER;
|
||||
|
||||
if (config->num_connections > 1) {
|
||||
dev_err_ratelimited(nbd_to_dev(nbd),
|
||||
"Connection timed out, retrying (%d/%d alive)\n",
|
||||
|
@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
|||
nbd_mark_nsock_dead(nbd, nsock, 1);
|
||||
mutex_unlock(&nsock->tx_lock);
|
||||
}
|
||||
blk_mq_requeue_request(req, true);
|
||||
mutex_unlock(&cmd->lock);
|
||||
nbd_requeue_cmd(cmd);
|
||||
nbd_config_put(nbd);
|
||||
return BLK_EH_DONE;
|
||||
}
|
||||
|
@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
|||
}
|
||||
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
mutex_unlock(&cmd->lock);
|
||||
sock_shutdown(nbd);
|
||||
nbd_config_put(nbd);
|
||||
done:
|
||||
|
@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||
struct iov_iter from;
|
||||
unsigned long size = blk_rq_bytes(req);
|
||||
struct bio *bio;
|
||||
u64 handle;
|
||||
u32 type;
|
||||
u32 nbd_cmd_flags = 0;
|
||||
u32 tag = blk_mq_unique_tag(req);
|
||||
int sent = nsock->sent, skip = 0;
|
||||
|
||||
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
|
||||
|
@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||
goto send_pages;
|
||||
}
|
||||
iov_iter_advance(&from, sent);
|
||||
} else {
|
||||
cmd->cmd_cookie++;
|
||||
}
|
||||
cmd->index = index;
|
||||
cmd->cookie = nsock->cookie;
|
||||
|
@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
|
||||
request.len = htonl(size);
|
||||
}
|
||||
memcpy(request.handle, &tag, sizeof(tag));
|
||||
handle = nbd_cmd_handle(cmd);
|
||||
memcpy(request.handle, &handle, sizeof(handle));
|
||||
|
||||
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
|
||||
req, nbdcmd_to_ascii(type),
|
||||
|
@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||
nsock->pending = req;
|
||||
nsock->sent = sent;
|
||||
}
|
||||
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
|
@ -541,6 +583,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||
*/
|
||||
nsock->pending = req;
|
||||
nsock->sent = sent;
|
||||
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
|
||||
return BLK_STS_RESOURCE;
|
||||
}
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
|
@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
|
|||
struct nbd_reply reply;
|
||||
struct nbd_cmd *cmd;
|
||||
struct request *req = NULL;
|
||||
u64 handle;
|
||||
u16 hwq;
|
||||
u32 tag;
|
||||
struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
|
||||
struct iov_iter to;
|
||||
int ret = 0;
|
||||
|
||||
reply.magic = 0;
|
||||
iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
|
||||
|
@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
|
|||
return ERR_PTR(-EPROTO);
|
||||
}
|
||||
|
||||
memcpy(&tag, reply.handle, sizeof(u32));
|
||||
|
||||
memcpy(&handle, reply.handle, sizeof(handle));
|
||||
tag = nbd_handle_to_tag(handle);
|
||||
hwq = blk_mq_unique_tag_to_hwq(tag);
|
||||
if (hwq < nbd->tag_set.nr_hw_queues)
|
||||
req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
|
||||
|
@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
|
|||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
cmd = blk_mq_rq_to_pdu(req);
|
||||
|
||||
mutex_lock(&cmd->lock);
|
||||
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
|
||||
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
|
||||
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
|
||||
dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
|
||||
req);
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (ntohl(reply.error)) {
|
||||
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
|
||||
ntohl(reply.error));
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
return cmd;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
|
||||
|
@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
|
|||
if (nbd_disconnected(config) ||
|
||||
config->num_connections <= 1) {
|
||||
cmd->status = BLK_STS_IOERR;
|
||||
return cmd;
|
||||
goto out;
|
||||
}
|
||||
return ERR_PTR(-EIO);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
|
||||
req, bvec.bv_len);
|
||||
}
|
||||
} else {
|
||||
/* See the comment in nbd_queue_rq. */
|
||||
wait_for_completion(&cmd->send_complete);
|
||||
}
|
||||
return cmd;
|
||||
out:
|
||||
mutex_unlock(&cmd->lock);
|
||||
return ret ? ERR_PTR(ret) : cmd;
|
||||
}
|
||||
|
||||
static void recv_work(struct work_struct *work)
|
||||
|
@ -805,7 +864,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
|||
*/
|
||||
blk_mq_start_request(req);
|
||||
if (unlikely(nsock->pending && nsock->pending != req)) {
|
||||
blk_mq_requeue_request(req, true);
|
||||
nbd_requeue_cmd(cmd);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
@ -818,7 +877,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
|||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Request send failed, requeueing\n");
|
||||
nbd_mark_nsock_dead(nbd, nsock, 1);
|
||||
blk_mq_requeue_request(req, true);
|
||||
nbd_requeue_cmd(cmd);
|
||||
ret = 0;
|
||||
}
|
||||
out:
|
||||
|
@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
* that the server is misbehaving (or there was an error) before we're
|
||||
* done sending everything over the wire.
|
||||
*/
|
||||
init_completion(&cmd->send_complete);
|
||||
mutex_lock(&cmd->lock);
|
||||
clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
|
||||
|
||||
/* We can be called directly from the user space process, which means we
|
||||
* could possibly have signals pending so our sendmsg will fail. In
|
||||
|
@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
ret = BLK_STS_IOERR;
|
||||
else if (!ret)
|
||||
ret = BLK_STS_OK;
|
||||
complete(&cmd->send_complete);
|
||||
mutex_unlock(&cmd->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||
{
|
||||
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
cmd->nbd = set->driver_data;
|
||||
cmd->flags = 0;
|
||||
mutex_init(&cmd->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
|
|||
#endif
|
||||
if (vma->vm_flags & VM_SHARED)
|
||||
return shmem_zero_setup(vma);
|
||||
vma_set_anonymous(vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1895,14 +1895,22 @@ static int
|
|||
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
|
||||
{
|
||||
size_t bytes;
|
||||
__u32 buf[16];
|
||||
__u32 t, buf[16];
|
||||
const char __user *p = buffer;
|
||||
|
||||
while (count > 0) {
|
||||
int b, i = 0;
|
||||
|
||||
bytes = min(count, sizeof(buf));
|
||||
if (copy_from_user(&buf, p, bytes))
|
||||
return -EFAULT;
|
||||
|
||||
for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
|
||||
if (!arch_get_random_int(&t))
|
||||
break;
|
||||
buf[i] ^= t;
|
||||
}
|
||||
|
||||
count -= bytes;
|
||||
p += bytes;
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#define ASPEED_MPLL_PARAM 0x20
|
||||
#define ASPEED_HPLL_PARAM 0x24
|
||||
#define AST2500_HPLL_BYPASS_EN BIT(20)
|
||||
#define AST2400_HPLL_STRAPPED BIT(18)
|
||||
#define AST2400_HPLL_PROGRAMMED BIT(18)
|
||||
#define AST2400_HPLL_BYPASS_EN BIT(17)
|
||||
#define ASPEED_MISC_CTRL 0x2c
|
||||
#define UART_DIV13_EN BIT(12)
|
||||
|
@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
|
|||
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
|
||||
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
|
||||
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
|
||||
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
|
||||
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
|
||||
[ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
|
||||
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
|
||||
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
|
||||
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
|
||||
[ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
|
||||
|
@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
|
|||
{
|
||||
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
|
||||
u32 clk = BIT(gate->clock_idx);
|
||||
u32 rst = BIT(gate->reset_idx);
|
||||
u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
|
||||
u32 reg;
|
||||
|
||||
/*
|
||||
* If the IP is in reset, treat the clock as not enabled,
|
||||
* this happens with some clocks such as the USB one when
|
||||
* coming from cold reset. Without this, aspeed_clk_enable()
|
||||
* will fail to lift the reset.
|
||||
*/
|
||||
if (gate->reset_idx >= 0) {
|
||||
regmap_read(gate->map, ASPEED_RESET_CTRL, ®);
|
||||
if (reg & rst)
|
||||
return 0;
|
||||
}
|
||||
|
||||
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®);
|
||||
|
||||
return ((reg & clk) == enval) ? 1 : 0;
|
||||
|
@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
|
|||
static void __init aspeed_ast2400_cc(struct regmap *map)
|
||||
{
|
||||
struct clk_hw *hw;
|
||||
u32 val, freq, div;
|
||||
u32 val, div, clkin, hpll;
|
||||
const u16 hpll_rates[][4] = {
|
||||
{384, 360, 336, 408},
|
||||
{400, 375, 350, 425},
|
||||
};
|
||||
int rate;
|
||||
|
||||
/*
|
||||
* CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
|
||||
* strapping
|
||||
*/
|
||||
regmap_read(map, ASPEED_STRAP, &val);
|
||||
if (val & CLKIN_25MHZ_EN)
|
||||
freq = 25000000;
|
||||
else if (val & AST2400_CLK_SOURCE_SEL)
|
||||
freq = 48000000;
|
||||
else
|
||||
freq = 24000000;
|
||||
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
|
||||
pr_debug("clkin @%u MHz\n", freq / 1000000);
|
||||
rate = (val >> 8) & 3;
|
||||
if (val & CLKIN_25MHZ_EN) {
|
||||
clkin = 25000000;
|
||||
hpll = hpll_rates[1][rate];
|
||||
} else if (val & AST2400_CLK_SOURCE_SEL) {
|
||||
clkin = 48000000;
|
||||
hpll = hpll_rates[0][rate];
|
||||
} else {
|
||||
clkin = 24000000;
|
||||
hpll = hpll_rates[0][rate];
|
||||
}
|
||||
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
|
||||
pr_debug("clkin @%u MHz\n", clkin / 1000000);
|
||||
|
||||
/*
|
||||
* High-speed PLL clock derived from the crystal. This the CPU clock,
|
||||
* and we assume that it is enabled
|
||||
* and we assume that it is enabled. It can be configured through the
|
||||
* HPLL_PARAM register, or set to a specified frequency by strapping.
|
||||
*/
|
||||
regmap_read(map, ASPEED_HPLL_PARAM, &val);
|
||||
WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
|
||||
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
|
||||
if (val & AST2400_HPLL_PROGRAMMED)
|
||||
hw = aspeed_ast2400_calc_pll("hpll", val);
|
||||
else
|
||||
hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
|
||||
hpll * 1000000);
|
||||
|
||||
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
|
||||
|
||||
/*
|
||||
* Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#include "clk.h"
|
||||
|
||||
|
@ -2559,7 +2558,7 @@ static const struct {
|
|||
unsigned long flag;
|
||||
const char *name;
|
||||
} clk_flags[] = {
|
||||
#define ENTRY(f) { f, __stringify(f) }
|
||||
#define ENTRY(f) { f, #f }
|
||||
ENTRY(CLK_SET_RATE_GATE),
|
||||
ENTRY(CLK_SET_PARENT_GATE),
|
||||
ENTRY(CLK_SET_RATE_PARENT),
|
||||
|
|
|
@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
|
|||
struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
|
||||
unsigned long divider;
|
||||
|
||||
divider = meson_parm_read(clk->map, &adiv->div);
|
||||
divider = meson_parm_read(clk->map, &adiv->div) + 1;
|
||||
|
||||
return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
|
||||
}
|
||||
|
|
|
@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
|
|||
.ops = &clk_regmap_gate_ops,
|
||||
.parent_names = (const char *[]){ "fclk_div2_div" },
|
||||
.num_parents = 1,
|
||||
.flags = CLK_IS_CRITICAL,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#define CLK_SEL 0x10
|
||||
#define CLK_DIS 0x14
|
||||
|
||||
#define ARMADA_37XX_DVFS_LOAD_1 1
|
||||
#define LOAD_LEVEL_NR 4
|
||||
|
||||
#define ARMADA_37XX_NB_L0L1 0x18
|
||||
|
@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
|
||||
* respectively) to L0 frequency (1.2 Ghz) requires a significant
|
||||
* amount of time to let VDD stabilize to the appropriate
|
||||
* voltage. This amount of time is large enough that it cannot be
|
||||
* covered by the hardware countdown register. Due to this, the CPU
|
||||
* might start operating at L0 before the voltage is stabilized,
|
||||
* leading to CPU stalls.
|
||||
*
|
||||
* To work around this problem, we prevent switching directly from the
|
||||
* L2/L3 frequencies to the L0 frequency, and instead switch to the L1
|
||||
* frequency in-between. The sequence therefore becomes:
|
||||
* 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
|
||||
* 2. Sleep 20ms for stabling VDD voltage
|
||||
* 3. Then switch from L1(600MHZ) to L0(1200Mhz).
|
||||
*/
|
||||
static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
|
||||
{
|
||||
unsigned int cur_level;
|
||||
|
||||
if (rate != 1200 * 1000 * 1000)
|
||||
return;
|
||||
|
||||
regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
|
||||
cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
|
||||
if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
|
||||
return;
|
||||
|
||||
regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
|
||||
ARMADA_37XX_NB_CPU_LOAD_MASK,
|
||||
ARMADA_37XX_DVFS_LOAD_1);
|
||||
msleep(20);
|
||||
}
|
||||
|
||||
static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
|
@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
*/
|
||||
reg = ARMADA_37XX_NB_CPU_LOAD;
|
||||
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
|
||||
|
||||
clk_pm_cpu_set_rate_wa(rate, base);
|
||||
|
||||
regmap_update_bits(base, reg, mask, load_level);
|
||||
|
||||
return rate;
|
||||
|
|
|
@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
|
|||
|
||||
static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
|
||||
.halt_reg = 0x75018,
|
||||
.halt_check = BRANCH_HALT_SKIP,
|
||||
.clkr = {
|
||||
.enable_reg = 0x75018,
|
||||
.enable_mask = BIT(0),
|
||||
|
|
|
@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
|
|||
.name = "mmagic_bimc",
|
||||
},
|
||||
.pwrsts = PWRSTS_OFF_ON,
|
||||
.flags = ALWAYS_ON,
|
||||
};
|
||||
|
||||
static struct gdsc mmagic_video_gdsc = {
|
||||
|
|
|
@ -311,12 +311,20 @@ static DEFINE_MUTEX(intel_pstate_limits_lock);
|
|||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
static bool intel_pstate_get_ppc_enable_status(void)
|
||||
static bool intel_pstate_acpi_pm_profile_server(void)
|
||||
{
|
||||
if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
|
||||
acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool intel_pstate_get_ppc_enable_status(void)
|
||||
{
|
||||
if (intel_pstate_acpi_pm_profile_server())
|
||||
return true;
|
||||
|
||||
return acpi_ppc;
|
||||
}
|
||||
|
||||
|
@ -459,6 +467,11 @@ static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *pol
|
|||
static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool intel_pstate_acpi_pm_profile_server(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void update_turbo_state(void)
|
||||
|
@ -1841,7 +1854,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
|
|||
intel_pstate_hwp_enable(cpu);
|
||||
|
||||
id = x86_match_cpu(intel_pstate_hwp_boost_ids);
|
||||
if (id)
|
||||
if (id && intel_pstate_acpi_pm_profile_server())
|
||||
hwp_boost = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -183,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
|
|||
static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
|
||||
{ .compatible = "qcom,apq8096", },
|
||||
{ .compatible = "qcom,msm8996", },
|
||||
{}
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
|
|||
fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
|
||||
fwspec.param_count = 2;
|
||||
fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
|
||||
fwspec.param[1] = IRQ_TYPE_NONE;
|
||||
/*
|
||||
* IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
|
||||
* temporarily. Anyway, ->irq_set_type() will override it later.
|
||||
*/
|
||||
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
|
||||
|
||||
return irq_create_fwspec_mapping(&fwspec);
|
||||
}
|
||||
|
|
|
@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
|
|||
* Note that active low is the default.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_REGULATOR) &&
|
||||
(of_device_is_compatible(np, "reg-fixed-voltage") ||
|
||||
(of_device_is_compatible(np, "regulator-fixed") ||
|
||||
of_device_is_compatible(np, "reg-fixed-voltage") ||
|
||||
of_device_is_compatible(np, "regulator-gpio"))) {
|
||||
/*
|
||||
* The regulator GPIO handles are specified such that the
|
||||
|
|
|
@ -652,6 +652,7 @@ enum intel_sbi_destination {
|
|||
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
|
||||
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
|
||||
#define QUIRK_INCREASE_T12_DELAY (1<<6)
|
||||
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
|
||||
|
||||
struct intel_fbdev;
|
||||
struct intel_fbc_work;
|
||||
|
|
|
@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
|||
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
|
||||
}
|
||||
|
||||
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder)
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
|
||||
uint32_t val = I915_READ(reg);
|
||||
|
||||
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
|
||||
val |= TRANS_DDI_PORT_NONE;
|
||||
I915_WRITE(reg, val);
|
||||
|
||||
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
|
||||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
|
||||
DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
|
||||
/* Quirk time at 100ms for reliable operation */
|
||||
msleep(100);
|
||||
}
|
||||
}
|
||||
|
||||
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
|
||||
|
|
|
@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
|
|||
intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
|
||||
|
||||
if (!transcoder_is_dsi(cpu_transcoder))
|
||||
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skylake_scaler_disable(intel_crtc);
|
||||
|
@ -14646,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
|
|||
DRM_INFO("Applying T12 delay quirk\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* GeminiLake NUC HDMI outputs require additional off time
|
||||
* this allows the onboard retimer to correctly sync to signal
|
||||
*/
|
||||
static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
|
||||
DRM_INFO("Applying Increase DDI Disabled quirk\n");
|
||||
}
|
||||
|
||||
struct intel_quirk {
|
||||
int device;
|
||||
int subsystem_vendor;
|
||||
|
@ -14732,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = {
|
|||
|
||||
/* Toshiba Satellite P50-C-18C */
|
||||
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
|
||||
|
||||
/* GeminiLake NUC */
|
||||
{ 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
|
||||
{ 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
|
||||
/* ASRock ITX*/
|
||||
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
|
||||
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
|
||||
};
|
||||
|
||||
static void intel_init_quirks(struct drm_device *dev)
|
||||
|
|
|
@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
|
|||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder);
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
struct intel_encoder *
|
||||
|
|
|
@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|||
return PTR_ERR(imx_ldb->regmap);
|
||||
}
|
||||
|
||||
/* disable LDB by resetting the control register to POR default */
|
||||
regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
|
||||
|
||||
imx_ldb->dev = dev;
|
||||
|
||||
if (of_id)
|
||||
|
@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
|
|||
if (ret || i < 0 || i > 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (!of_device_is_available(child))
|
||||
continue;
|
||||
|
||||
if (dual && i > 0) {
|
||||
dev_warn(dev, "dual-channel mode, ignoring second output\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!of_device_is_available(child))
|
||||
continue;
|
||||
|
||||
channel = &imx_ldb->channel[i];
|
||||
channel->ldb = imx_ldb;
|
||||
channel->chno = i;
|
||||
|
|
|
@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
|
|||
break;
|
||||
case V4L2_MBUS_BT656:
|
||||
csicfg->ext_vsync = 0;
|
||||
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
|
||||
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
|
||||
mbus_fmt->field == V4L2_FIELD_ALTERNATE)
|
||||
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
|
||||
else
|
||||
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
|
||||
|
|
|
@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
|
|||
/*
|
||||
* It's not always possible to have 1 to 2 ratio when d=7, so fall back
|
||||
* to minimal possible clkh in this case.
|
||||
*
|
||||
* Note:
|
||||
* CLKH is not allowed to be 0, in this case I2C clock is not generated
|
||||
* at all
|
||||
*/
|
||||
if (clk >= clkl + d) {
|
||||
if (clk > clkl + d) {
|
||||
clkh = clk - clkl - d;
|
||||
clkl -= d;
|
||||
} else {
|
||||
clkh = 0;
|
||||
clkh = 1;
|
||||
clkl = clk - (d << 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
|
|||
goto err_desc;
|
||||
}
|
||||
|
||||
reinit_completion(&dma->cmd_complete);
|
||||
txdesc->callback = i2c_imx_dma_callback;
|
||||
txdesc->callback_param = i2c_imx;
|
||||
if (dma_submit_error(dmaengine_submit(txdesc))) {
|
||||
|
@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
|
|||
* The first byte must be transmitted by the CPU.
|
||||
*/
|
||||
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
|
||||
reinit_completion(&i2c_imx->dma->cmd_complete);
|
||||
time_left = wait_for_completion_timeout(
|
||||
&i2c_imx->dma->cmd_complete,
|
||||
msecs_to_jiffies(DMA_TIMEOUT));
|
||||
|
@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
|
|||
if (result)
|
||||
return result;
|
||||
|
||||
reinit_completion(&i2c_imx->dma->cmd_complete);
|
||||
time_left = wait_for_completion_timeout(
|
||||
&i2c_imx->dma->cmd_complete,
|
||||
msecs_to_jiffies(DMA_TIMEOUT));
|
||||
|
@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
|
|||
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
|
||||
"gpio");
|
||||
rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
|
||||
rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
|
||||
rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
|
||||
|
||||
if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
|
||||
PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/* register offsets */
|
||||
|
@ -111,8 +112,9 @@
|
|||
#define ID_ARBLOST (1 << 3)
|
||||
#define ID_NACK (1 << 4)
|
||||
/* persistent flags */
|
||||
#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
|
||||
#define ID_P_PM_BLOCKED (1 << 31)
|
||||
#define ID_P_MASK ID_P_PM_BLOCKED
|
||||
#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
|
||||
|
||||
enum rcar_i2c_type {
|
||||
I2C_RCAR_GEN1,
|
||||
|
@ -141,6 +143,8 @@ struct rcar_i2c_priv {
|
|||
struct dma_chan *dma_rx;
|
||||
struct scatterlist sg;
|
||||
enum dma_data_direction dma_direction;
|
||||
|
||||
struct reset_control *rstc;
|
||||
};
|
||||
|
||||
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
|
||||
|
@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
|
|||
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
|
||||
sg_dma_len(&priv->sg), priv->dma_direction);
|
||||
|
||||
/* Gen3 can only do one RXDMA per transfer and we just completed it */
|
||||
if (priv->devtype == I2C_RCAR_GEN3 &&
|
||||
priv->dma_direction == DMA_FROM_DEVICE)
|
||||
priv->flags |= ID_P_NO_RXDMA;
|
||||
|
||||
priv->dma_direction = DMA_NONE;
|
||||
}
|
||||
|
||||
|
@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
|
|||
unsigned char *buf;
|
||||
int len;
|
||||
|
||||
/* Do not use DMA if it's not available or for messages < 8 bytes */
|
||||
if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
|
||||
/* Do various checks to see if DMA is feasible at all */
|
||||
if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
|
||||
(read && priv->flags & ID_P_NO_RXDMA))
|
||||
return;
|
||||
|
||||
if (read) {
|
||||
|
@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
|
|||
}
|
||||
}
|
||||
|
||||
/* I2C is a special case, we need to poll the status of a reset */
|
||||
static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
ret = reset_control_reset(priv->rstc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < LOOP_TIMEOUT; i++) {
|
||||
ret = reset_control_status(priv->rstc);
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
|
||||
struct i2c_msg *msgs,
|
||||
int num)
|
||||
|
@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
|
|||
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
/* Gen3 needs a reset before allowing RXDMA once */
|
||||
if (priv->devtype == I2C_RCAR_GEN3) {
|
||||
priv->flags |= ID_P_NO_RXDMA;
|
||||
if (!IS_ERR(priv->rstc)) {
|
||||
ret = rcar_i2c_do_reset(priv);
|
||||
if (ret == 0)
|
||||
priv->flags &= ~ID_P_NO_RXDMA;
|
||||
}
|
||||
}
|
||||
|
||||
rcar_i2c_init(priv);
|
||||
|
||||
ret = rcar_i2c_bus_barrier(priv);
|
||||
|
@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
|
|||
if (ret < 0)
|
||||
goto out_pm_put;
|
||||
|
||||
if (priv->devtype == I2C_RCAR_GEN3) {
|
||||
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
|
||||
if (!IS_ERR(priv->rstc)) {
|
||||
ret = reset_control_status(priv->rstc);
|
||||
if (ret < 0)
|
||||
priv->rstc = ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
}
|
||||
|
||||
/* Stay always active when multi-master to keep arbitration working */
|
||||
if (of_property_read_bool(dev->of_node, "multi-master"))
|
||||
priv->flags |= ID_P_PM_BLOCKED;
|
||||
|
|
|
@ -624,7 +624,7 @@ static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
|
|||
static void i2c_adapter_lock_bus(struct i2c_adapter *adapter,
|
||||
unsigned int flags)
|
||||
{
|
||||
rt_mutex_lock(&adapter->bus_lock);
|
||||
rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -144,7 +144,7 @@ static void i2c_mux_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
|
|||
struct i2c_mux_priv *priv = adapter->algo_data;
|
||||
struct i2c_adapter *parent = priv->muxc->parent;
|
||||
|
||||
rt_mutex_lock(&parent->mux_lock);
|
||||
rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
|
||||
if (!(flags & I2C_LOCK_ROOT_ADAPTER))
|
||||
return;
|
||||
i2c_lock_bus(parent, flags);
|
||||
|
@ -181,7 +181,7 @@ static void i2c_parent_lock_bus(struct i2c_adapter *adapter,
|
|||
struct i2c_mux_priv *priv = adapter->algo_data;
|
||||
struct i2c_adapter *parent = priv->muxc->parent;
|
||||
|
||||
rt_mutex_lock(&parent->mux_lock);
|
||||
rt_mutex_lock_nested(&parent->mux_lock, i2c_adapter_depth(adapter));
|
||||
i2c_lock_bus(parent, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
|||
{ "ELAN0611", 0 },
|
||||
{ "ELAN0612", 0 },
|
||||
{ "ELAN0618", 0 },
|
||||
{ "ELAN061D", 0 },
|
||||
{ "ELAN0622", 0 },
|
||||
{ "ELAN1000", 0 },
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Lenovo LaVie Z */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
|
|
@ -293,9 +293,10 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
|
|||
int i;
|
||||
|
||||
for_each_sg(data->sg, sg, data->sg_len, i) {
|
||||
void *buf = kmap_atomic(sg_page(sg) + sg->offset;
|
||||
void *buf = kmap_atomic(sg_page(sg) + sg->offset);
|
||||
buffer_swap32(buf, sg->length);
|
||||
kunmap_atomic(buf);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
|
||||
|
|
|
@ -1717,6 +1717,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
|||
goto err_upper_unlink;
|
||||
}
|
||||
|
||||
bond->nest_level = dev_get_nest_level(bond_dev) + 1;
|
||||
|
||||
/* If the mode uses primary, then the following is handled by
|
||||
* bond_change_active_slave().
|
||||
*/
|
||||
|
@ -1764,7 +1766,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
|
|||
if (bond_mode_can_use_xmit_hash(bond))
|
||||
bond_update_slave_arr(bond, NULL);
|
||||
|
||||
bond->nest_level = dev_get_nest_level(bond_dev);
|
||||
|
||||
netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
|
||||
slave_dev->name,
|
||||
|
@ -3415,6 +3416,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
|
|||
}
|
||||
}
|
||||
|
||||
static int bond_get_nest_level(struct net_device *bond_dev)
|
||||
{
|
||||
struct bonding *bond = netdev_priv(bond_dev);
|
||||
|
||||
return bond->nest_level;
|
||||
}
|
||||
|
||||
static void bond_get_stats(struct net_device *bond_dev,
|
||||
struct rtnl_link_stats64 *stats)
|
||||
{
|
||||
|
@ -3423,7 +3431,7 @@ static void bond_get_stats(struct net_device *bond_dev,
|
|||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
|
||||
spin_lock(&bond->stats_lock);
|
||||
spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
|
||||
memcpy(stats, &bond->bond_stats, sizeof(*stats));
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -4228,6 +4236,7 @@ static const struct net_device_ops bond_netdev_ops = {
|
|||
.ndo_neigh_setup = bond_neigh_setup,
|
||||
.ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
|
||||
.ndo_get_lock_subclass = bond_get_nest_level,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_netpoll_setup = bond_netpoll_setup,
|
||||
.ndo_netpoll_cleanup = bond_netpoll_cleanup,
|
||||
|
@ -4726,6 +4735,7 @@ static int bond_init(struct net_device *bond_dev)
|
|||
if (!bond->wq)
|
||||
return -ENOMEM;
|
||||
|
||||
bond->nest_level = SINGLE_DEPTH_NESTING;
|
||||
netdev_lockdep_set_classes(bond_dev);
|
||||
|
||||
list_add_tail(&bond->bond_list, &bn->dev_list);
|
||||
|
|
|
@ -1072,6 +1072,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
|
|||
usb_free_urb(dev->intr_urb);
|
||||
|
||||
kfree(dev->intr_in_buffer);
|
||||
kfree(dev->tx_msg_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2617,7 +2617,6 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
|
|||
.rmu_disable = mv88e6085_g1_rmu_disable,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.serdes_power = mv88e6341_serdes_power,
|
||||
};
|
||||
|
||||
static const struct mv88e6xxx_ops mv88e6095_ops = {
|
||||
|
@ -2783,6 +2782,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
|
|||
.reset = mv88e6352_g1_reset,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.serdes_power = mv88e6341_serdes_power,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
};
|
||||
|
||||
|
@ -2964,7 +2964,6 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
|
|||
.reset = mv88e6352_g1_reset,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.serdes_power = mv88e6341_serdes_power,
|
||||
};
|
||||
|
||||
static const struct mv88e6xxx_ops mv88e6176_ops = {
|
||||
|
@ -3346,6 +3345,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
|
|||
.reset = mv88e6352_g1_reset,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.serdes_power = mv88e6341_serdes_power,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.avb_ops = &mv88e6390_avb_ops,
|
||||
.ptp_ops = &mv88e6352_ptp_ops,
|
||||
|
|
|
@ -333,6 +333,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
|
|||
|
||||
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
|
||||
|
||||
io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
|
||||
io_sq->desc_entry_size =
|
||||
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
|
||||
sizeof(struct ena_eth_io_tx_desc) :
|
||||
|
|
|
@ -1111,14 +1111,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
|
|||
|
||||
if (pdata->tx_pause != pdata->phy.tx_pause) {
|
||||
new_state = 1;
|
||||
pdata->hw_if.config_tx_flow_control(pdata);
|
||||
pdata->tx_pause = pdata->phy.tx_pause;
|
||||
pdata->hw_if.config_tx_flow_control(pdata);
|
||||
}
|
||||
|
||||
if (pdata->rx_pause != pdata->phy.rx_pause) {
|
||||
new_state = 1;
|
||||
pdata->hw_if.config_rx_flow_control(pdata);
|
||||
pdata->rx_pause = pdata->phy.rx_pause;
|
||||
pdata->hw_if.config_rx_flow_control(pdata);
|
||||
}
|
||||
|
||||
/* Speed support */
|
||||
|
|
|
@ -3073,6 +3073,7 @@ static void cxgb_del_udp_tunnel(struct net_device *netdev,
|
|||
|
||||
adapter->geneve_port = 0;
|
||||
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
@ -3158,6 +3159,7 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
|
|||
|
||||
t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
|
||||
GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2047,28 +2047,42 @@ static int enic_stop(struct net_device *netdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
bool running = netif_running(netdev);
|
||||
int err = 0;
|
||||
|
||||
ASSERT_RTNL();
|
||||
if (running) {
|
||||
err = enic_stop(netdev);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
if (running) {
|
||||
err = enic_open(netdev);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int enic_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct enic *enic = netdev_priv(netdev);
|
||||
int running = netif_running(netdev);
|
||||
|
||||
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (running)
|
||||
enic_stop(netdev);
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
if (netdev->mtu > enic->port_mtu)
|
||||
netdev_warn(netdev,
|
||||
"interface MTU (%d) set higher than port MTU (%d)\n",
|
||||
netdev->mtu, enic->port_mtu);
|
||||
"interface MTU (%d) set higher than port MTU (%d)\n",
|
||||
netdev->mtu, enic->port_mtu);
|
||||
|
||||
if (running)
|
||||
enic_open(netdev);
|
||||
|
||||
return 0;
|
||||
return _enic_change_mtu(netdev, new_mtu);
|
||||
}
|
||||
|
||||
static void enic_change_mtu_work(struct work_struct *work)
|
||||
|
@ -2076,47 +2090,9 @@ static void enic_change_mtu_work(struct work_struct *work)
|
|||
struct enic *enic = container_of(work, struct enic, change_mtu_work);
|
||||
struct net_device *netdev = enic->netdev;
|
||||
int new_mtu = vnic_dev_mtu(enic->vdev);
|
||||
int err;
|
||||
unsigned int i;
|
||||
|
||||
new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
/* Stop RQ */
|
||||
del_timer_sync(&enic->notify_timer);
|
||||
|
||||
for (i = 0; i < enic->rq_count; i++)
|
||||
napi_disable(&enic->napi[i]);
|
||||
|
||||
vnic_intr_mask(&enic->intr[0]);
|
||||
enic_synchronize_irqs(enic);
|
||||
err = vnic_rq_disable(&enic->rq[0]);
|
||||
if (err) {
|
||||
rtnl_unlock();
|
||||
netdev_err(netdev, "Unable to disable RQ.\n");
|
||||
return;
|
||||
}
|
||||
vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
|
||||
vnic_cq_clean(&enic->cq[0]);
|
||||
vnic_intr_clean(&enic->intr[0]);
|
||||
|
||||
/* Fill RQ with new_mtu-sized buffers */
|
||||
netdev->mtu = new_mtu;
|
||||
vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
|
||||
/* Need at least one buffer on ring to get going */
|
||||
if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
|
||||
rtnl_unlock();
|
||||
netdev_err(netdev, "Unable to alloc receive buffers.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Start RQ */
|
||||
vnic_rq_enable(&enic->rq[0]);
|
||||
napi_enable(&enic->napi[0]);
|
||||
vnic_intr_unmask(&enic->intr[0]);
|
||||
enic_notify_timer_start(enic);
|
||||
|
||||
(void)_enic_change_mtu(netdev, new_mtu);
|
||||
rtnl_unlock();
|
||||
|
||||
netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
|
||||
|
@ -2916,7 +2892,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
*/
|
||||
|
||||
enic->port_mtu = enic->config.mtu;
|
||||
(void)enic_change_mtu(netdev, enic->port_mtu);
|
||||
|
||||
err = enic_set_mac_addr(netdev, enic->mac_addr);
|
||||
if (err) {
|
||||
|
@ -3006,6 +2981,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* MTU range: 68 - 9000 */
|
||||
netdev->min_mtu = ENIC_MIN_MTU;
|
||||
netdev->max_mtu = ENIC_MAX_MTU;
|
||||
netdev->mtu = enic->port_mtu;
|
||||
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
|
|
|
@ -983,6 +983,7 @@ static int nic_dev_init(struct pci_dev *pdev)
|
|||
hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS,
|
||||
nic_dev, link_status_event_handler);
|
||||
|
||||
SET_NETDEV_DEV(netdev, &pdev->dev);
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to register netdev\n");
|
||||
|
|
|
@ -1172,6 +1172,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
|
|||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
|
||||
|
||||
if (!MLX5_DSCP_SUPPORTED(mdev))
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -3754,7 +3754,8 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
|
|||
|
||||
if (!reset) {
|
||||
params->sw_mtu = new_mtu;
|
||||
set_mtu_cb(priv);
|
||||
if (set_mtu_cb)
|
||||
set_mtu_cb(priv);
|
||||
netdev->mtu = params->sw_mtu;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1696,7 +1696,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|||
int vport_num;
|
||||
int err;
|
||||
|
||||
if (!MLX5_VPORT_MANAGER(dev))
|
||||
if (!MLX5_ESWITCH_MANAGER(dev))
|
||||
return 0;
|
||||
|
||||
esw_info(dev,
|
||||
|
@ -1765,7 +1765,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
|||
|
||||
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
|
||||
if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev))
|
||||
return;
|
||||
|
||||
esw_info(esw->dev, "cleanup\n");
|
||||
|
|
|
@ -76,6 +76,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
|
|||
void *ppriv)
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
u16 max_mtu;
|
||||
|
||||
/* priv init */
|
||||
priv->mdev = mdev;
|
||||
|
@ -84,6 +85,9 @@ void mlx5i_init(struct mlx5_core_dev *mdev,
|
|||
priv->ppriv = ppriv;
|
||||
mutex_init(&priv->state_lock);
|
||||
|
||||
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
|
||||
netdev->mtu = max_mtu;
|
||||
|
||||
mlx5e_build_nic_params(mdev, &priv->channels.params,
|
||||
profile->max_nch(mdev), netdev->mtu);
|
||||
mlx5i_build_nic_params(mdev, &priv->channels.params);
|
||||
|
|
|
@ -80,7 +80,7 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
|
|||
return NFP_REPR_TYPE_VF;
|
||||
}
|
||||
|
||||
return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
|
||||
return __NFP_REPR_TYPE_MAX;
|
||||
}
|
||||
|
||||
static struct net_device *
|
||||
|
@ -91,6 +91,8 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
|
|||
u8 port = 0;
|
||||
|
||||
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
|
||||
if (repr_type > NFP_REPR_TYPE_MAX)
|
||||
return NULL;
|
||||
|
||||
reprs = rcu_dereference(app->reprs[repr_type]);
|
||||
if (!reprs)
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#include "dwmac1000.h"
|
||||
#include "hwif.h"
|
||||
|
||||
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
|
||||
#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
|
||||
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
|
||||
|
||||
/* Module parameters */
|
||||
|
|
|
@ -257,7 +257,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
|
|||
return -ENOMEM;
|
||||
|
||||
/* Enable pci device */
|
||||
ret = pcim_enable_device(pdev);
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
|
||||
__func__);
|
||||
|
@ -300,9 +300,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
|
|||
static void stmmac_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
stmmac_dvr_remove(&pdev->dev);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
|
||||
static int stmmac_pci_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
int ret;
|
||||
|
||||
ret = stmmac_suspend(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pci_save_state(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_disable_device(pdev);
|
||||
pci_wake_from_d3(pdev, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stmmac_pci_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
int ret;
|
||||
|
||||
pci_restore_state(pdev);
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
return stmmac_resume(dev);
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
|
||||
|
||||
/* synthetic ID, no official vendor */
|
||||
#define PCI_VENDOR_ID_STMMAC 0x700
|
||||
|
|
|
@ -207,6 +207,7 @@ void nsim_devlink_teardown(struct netdevsim *ns)
|
|||
struct net *net = nsim_to_net(ns);
|
||||
bool *reg_devlink = net_generic(net, nsim_devlink_id);
|
||||
|
||||
devlink_resources_unregister(ns->devlink, NULL);
|
||||
devlink_unregister(ns->devlink);
|
||||
devlink_free(ns->devlink);
|
||||
ns->devlink = NULL;
|
||||
|
|
|
@ -218,7 +218,7 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
|
|||
|
||||
static int mdio_mux_iproc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
|
||||
struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
|
||||
|
||||
mdio_mux_uninit(md->mux_handle);
|
||||
mdiobus_unregister(md->mii_bus);
|
||||
|
|
|
@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
|||
mod_timer(&dev->stat_monitor,
|
||||
jiffies + STAT_UPDATE_TIMER);
|
||||
}
|
||||
|
||||
tasklet_schedule(&dev->bh);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1785,7 +1785,8 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
|
|||
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
|
||||
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
|
||||
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
|
||||
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus);
|
||||
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
|
||||
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
|
||||
fwreq->bus_nr = devinfo->pdev->bus->number;
|
||||
|
||||
return fwreq;
|
||||
|
|
|
@ -178,6 +178,17 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
|
|||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9260_killer_2ac_cfg = {
|
||||
.name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9270_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9270",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
|
@ -267,6 +278,34 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
|
|||
.soc_latency = 5000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
|
||||
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
|
||||
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9460",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
|
@ -327,6 +366,36 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
|
|||
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
|
||||
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
|
||||
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
|
||||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
|
|
|
@ -551,6 +551,7 @@ extern const struct iwl_cfg iwl8275_2ac_cfg;
|
|||
extern const struct iwl_cfg iwl4165_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9160_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9260_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9270_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9460_2ac_cfg;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg;
|
||||
|
@ -558,10 +559,14 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
|
|||
extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
|
||||
extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
|
||||
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
|
||||
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
|
||||
extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
|
||||
extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
|
||||
extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
|
||||
extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
|
||||
extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
|
||||
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
|
||||
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
|
||||
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
|
||||
|
|
|
@ -545,6 +545,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
|
@ -554,6 +557,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
|
||||
|
@ -578,6 +582,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
|
||||
|
@ -604,6 +610,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
|
@ -630,6 +638,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
|
||||
{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
|
||||
|
@ -656,6 +666,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
|
@ -682,6 +694,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
|
@ -708,6 +722,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
|
@ -743,6 +759,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
|
@ -771,6 +789,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
|
@ -797,6 +817,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
|
|
|
@ -87,6 +87,7 @@ struct netfront_cb {
|
|||
/* IRQ name is queue name with "-tx" or "-rx" appended */
|
||||
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
|
||||
|
||||
static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
|
||||
|
||||
struct netfront_stats {
|
||||
|
@ -1331,6 +1332,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
|
|||
netif_carrier_off(netdev);
|
||||
|
||||
xenbus_switch_state(dev, XenbusStateInitialising);
|
||||
wait_event(module_load_q,
|
||||
xenbus_read_driver_state(dev->otherend) !=
|
||||
XenbusStateClosed &&
|
||||
xenbus_read_driver_state(dev->otherend) !=
|
||||
XenbusStateUnknown);
|
||||
return netdev;
|
||||
|
||||
exit:
|
||||
|
|
|
@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
|
|||
/*
|
||||
* For something we're not in a state to send to the device the default action
|
||||
* is to busy it and retry it after the controller state is recovered. However,
|
||||
* anything marked for failfast or nvme multipath is immediately failed.
|
||||
* if the controller is deleting or if anything is marked for failfast or
|
||||
* nvme multipath it is immediately failed.
|
||||
*
|
||||
* Note: commands used to initialize the controller will be marked for failfast.
|
||||
* Note: nvme cli/ioctl commands are marked for failfast.
|
||||
*/
|
||||
blk_status_t nvmf_fail_nonready_command(struct request *rq)
|
||||
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
|
||||
struct request *rq)
|
||||
{
|
||||
if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
||||
if (ctrl->state != NVME_CTRL_DELETING &&
|
||||
ctrl->state != NVME_CTRL_DEAD &&
|
||||
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
|
||||
return BLK_STS_RESOURCE;
|
||||
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
||||
return BLK_STS_IOERR;
|
||||
|
|
|
@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
|
|||
void nvmf_free_options(struct nvmf_ctrl_options *opts);
|
||||
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
|
||||
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
|
||||
blk_status_t nvmf_fail_nonready_command(struct request *rq);
|
||||
blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
|
||||
struct request *rq);
|
||||
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
||||
bool queue_live);
|
||||
|
||||
|
|
|
@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
|
||||
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
|
||||
return nvmf_fail_nonready_command(rq);
|
||||
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
|
||||
|
||||
ret = nvme_setup_cmd(ns, rq, sqe);
|
||||
if (ret)
|
||||
|
|
|
@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
WARN_ON_ONCE(rq->tag < 0);
|
||||
|
||||
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
|
||||
return nvmf_fail_nonready_command(rq);
|
||||
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
|
||||
|
||||
dev = queue->device->dev;
|
||||
ib_dma_sync_single_for_cpu(dev, sqe->dma,
|
||||
|
|
|
@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
|
|||
{
|
||||
struct nvmet_ns *ns = to_nvmet_ns(item);
|
||||
struct nvmet_subsys *subsys = ns->subsys;
|
||||
size_t len;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
|
@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
|
|||
if (ns->enabled)
|
||||
goto out_unlock;
|
||||
|
||||
kfree(ns->device_path);
|
||||
ret = -EINVAL;
|
||||
len = strcspn(page, "\n");
|
||||
if (!len)
|
||||
goto out_unlock;
|
||||
|
||||
kfree(ns->device_path);
|
||||
ret = -ENOMEM;
|
||||
ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
|
||||
ns->device_path = kstrndup(page, len, GFP_KERNEL);
|
||||
if (!ns->device_path)
|
||||
goto out_unlock;
|
||||
|
||||
|
|
|
@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
|
|||
goto out_unlock;
|
||||
|
||||
ret = nvmet_bdev_ns_enable(ns);
|
||||
if (ret)
|
||||
if (ret == -ENOTBLK)
|
||||
ret = nvmet_file_ns_enable(ns);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
|
|
@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
|
|||
struct work_struct work;
|
||||
} __aligned(sizeof(unsigned long long));
|
||||
|
||||
/* desired maximum for a single sequence - if sg list allows it */
|
||||
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
|
||||
#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
|
||||
|
||||
enum nvmet_fcp_datadir {
|
||||
NVMET_FCP_NODATA,
|
||||
|
@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
|
|||
struct nvme_fc_cmd_iu cmdiubuf;
|
||||
struct nvme_fc_ersp_iu rspiubuf;
|
||||
dma_addr_t rspdma;
|
||||
struct scatterlist *next_sg;
|
||||
struct scatterlist *data_sg;
|
||||
int data_sg_cnt;
|
||||
u32 offset;
|
||||
|
@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
|
|||
INIT_LIST_HEAD(&newrec->assoc_list);
|
||||
kref_init(&newrec->ref);
|
||||
ida_init(&newrec->assoc_cnt);
|
||||
newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
|
||||
template->max_sgl_segments);
|
||||
newrec->max_sg_cnt = template->max_sgl_segments;
|
||||
|
||||
ret = nvmet_fc_alloc_ls_iodlist(newrec);
|
||||
if (ret) {
|
||||
|
@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
|
|||
((fod->io_dir == NVMET_FCP_WRITE) ?
|
||||
DMA_FROM_DEVICE : DMA_TO_DEVICE));
|
||||
/* note: write from initiator perspective */
|
||||
fod->next_sg = fod->data_sg;
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
|
|||
struct nvmet_fc_fcp_iod *fod, u8 op)
|
||||
{
|
||||
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
|
||||
struct scatterlist *sg = fod->next_sg;
|
||||
unsigned long flags;
|
||||
u32 tlen;
|
||||
u32 remaininglen = fod->req.transfer_len - fod->offset;
|
||||
u32 tlen = 0;
|
||||
int ret;
|
||||
|
||||
fcpreq->op = op;
|
||||
fcpreq->offset = fod->offset;
|
||||
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
|
||||
|
||||
tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
|
||||
(fod->req.transfer_len - fod->offset));
|
||||
/*
|
||||
* for next sequence:
|
||||
* break at a sg element boundary
|
||||
* attempt to keep sequence length capped at
|
||||
* NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
|
||||
* be longer if a single sg element is larger
|
||||
* than that amount. This is done to avoid creating
|
||||
* a new sg list to use for the tgtport api.
|
||||
*/
|
||||
fcpreq->sg = sg;
|
||||
fcpreq->sg_cnt = 0;
|
||||
while (tlen < remaininglen &&
|
||||
fcpreq->sg_cnt < tgtport->max_sg_cnt &&
|
||||
tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
|
||||
fcpreq->sg_cnt++;
|
||||
tlen += sg_dma_len(sg);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
|
||||
fcpreq->sg_cnt++;
|
||||
tlen += min_t(u32, sg_dma_len(sg), remaininglen);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
if (tlen < remaininglen)
|
||||
fod->next_sg = sg;
|
||||
else
|
||||
fod->next_sg = NULL;
|
||||
|
||||
fcpreq->transfer_length = tlen;
|
||||
fcpreq->transferred_length = 0;
|
||||
fcpreq->fcp_error = 0;
|
||||
fcpreq->rsplen = 0;
|
||||
|
||||
fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
|
||||
fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* If the last READDATA request: check if LLDD supports
|
||||
* combined xfr with response.
|
||||
|
|
|
@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
blk_status_t ret;
|
||||
|
||||
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
|
||||
return nvmf_fail_nonready_command(req);
|
||||
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
|
||||
|
||||
ret = nvme_setup_cmd(ns, req, &iod->cmd);
|
||||
if (ret)
|
||||
|
|
|
@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
|
|||
|
||||
parent = udev->subordinate;
|
||||
pci_lock_rescan_remove();
|
||||
pci_dev_get(dev);
|
||||
list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
|
||||
bus_list) {
|
||||
pci_dev_get(pdev);
|
||||
|
@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
|
|||
pci_info(dev, "Device recovery from fatal error failed\n");
|
||||
}
|
||||
|
||||
pci_dev_put(dev);
|
||||
pci_unlock_rescan_remove();
|
||||
}
|
||||
|
||||
|
|
|
@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
|
|||
{
|
||||
void __iomem *ctrl = params->ctrl_regs;
|
||||
|
||||
USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
|
||||
/* 1 millisecond - for USB clocks to settle down */
|
||||
usleep_range(1000, 2000);
|
||||
|
||||
if (BRCM_ID(params->family_id) == 0x7366) {
|
||||
/*
|
||||
* The PHY3_SOFT_RESETB bits default to the wrong state.
|
||||
|
|
|
@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
|
|||
ddata = container_of(work, struct phy_mdm6600, status_work.work);
|
||||
dev = ddata->dev;
|
||||
|
||||
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
|
||||
error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
|
||||
ddata->status_gpios->desc,
|
||||
values);
|
||||
if (error)
|
||||
return;
|
||||
|
||||
for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
|
||||
for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
|
||||
val |= values[i] << i;
|
||||
dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
|
||||
__func__, i, values[i], val);
|
||||
|
|
|
@ -284,11 +284,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
|
|||
*/
|
||||
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
|
||||
iscsi_conn_printk(KERN_INFO, conn,
|
||||
"task [op %x/%x itt "
|
||||
"task [op %x itt "
|
||||
"0x%x/0x%x] "
|
||||
"rejected.\n",
|
||||
task->hdr->opcode, opcode,
|
||||
task->itt, task->hdr_itt);
|
||||
opcode, task->itt,
|
||||
task->hdr_itt);
|
||||
return -EACCES;
|
||||
}
|
||||
/*
|
||||
|
@ -297,10 +297,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
|
|||
*/
|
||||
if (conn->session->fast_abort) {
|
||||
iscsi_conn_printk(KERN_INFO, conn,
|
||||
"task [op %x/%x itt "
|
||||
"task [op %x itt "
|
||||
"0x%x/0x%x] fast abort.\n",
|
||||
task->hdr->opcode, opcode,
|
||||
task->itt, task->hdr_itt);
|
||||
opcode, task->itt,
|
||||
task->hdr_itt);
|
||||
return -EACCES;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -2141,6 +2141,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
|
|||
msleep(1000);
|
||||
|
||||
qla24xx_disable_vp(vha);
|
||||
qla2x00_wait_for_sess_deletion(vha);
|
||||
|
||||
vha->flags.delete_progress = 1;
|
||||
|
||||
|
|
|
@ -214,6 +214,7 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
|
|||
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
|
||||
int qla24xx_async_abort_cmd(srb_t *);
|
||||
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
|
||||
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
|
||||
|
||||
/*
|
||||
* Global Functions in qla_mid.c source file.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user