summary refs log tree commit diff
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/uasm.h4
-rw-r--r--arch/mips/include/uapi/asm/inst.h1
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h8
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/ftrace.c3
-rw-r--r--arch/mips/kernel/irq-msc01.c2
-rw-r--r--arch/mips/kernel/mcount.S7
-rw-r--r--arch/mips/kernel/pm-cps.c4
-rw-r--r--arch/mips/kernel/r4k_fpu.S213
-rw-r--r--arch/mips/kernel/signal.c79
-rw-r--r--arch/mips/kernel/signal32.c74
-rw-r--r--arch/mips/kernel/smp-cps.c2
-rw-r--r--arch/mips/kvm/mips.c1
-rw-r--r--arch/mips/math-emu/ieee754.c23
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c3
-rw-r--r--arch/mips/mm/uasm.c10
-rw-r--r--arch/mips/net/bpf_jit.c266
20 files changed, 205 insertions, 503 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7a469acee33c..10f270bd3e25 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -15,7 +15,6 @@ config MIPS
 	select HAVE_BPF_JIT if !CPU_MICROMIPS
 	select ARCH_HAVE_CUSTOM_GPIO_H
 	select HAVE_FUNCTION_TRACER
-	select HAVE_FUNCTION_TRACE_MCOUNT_TEST
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_C_RECORDMCOUNT
@@ -269,6 +268,7 @@ config LANTIQ
 config LASAT
 	bool "LASAT Networks platforms"
 	select CEVT_R4K
+	select CRC32
 	select CSRC_R4K
 	select DMA_NONCOHERENT
 	select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index f54bdbe85c0d..eeeb0f48c767 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,8 +32,6 @@ struct sigcontext32 {
 	__u32		sc_lo2;
 	__u32		sc_hi3;
 	__u32		sc_lo3;
-	__u64		sc_msaregs[32];	/* Most significant 64 bits */
-	__u32		sc_msa_csr;
 };
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
 #endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index f8d63b3b40b4..708c5d414905 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -67,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 #define Ip_u2s3u1(op)							\
 void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
 
+#define Ip_s3s1s2(op)							\
+void ISAOPC(op)(u32 **buf, int a, int b, int c)
+
 #define Ip_u2u1s3(op)							\
 void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
@@ -147,6 +150,7 @@ Ip_u2s3u1(_scd);
 Ip_u2s3u1(_sd);
 Ip_u2u1u3(_sll);
 Ip_u3u2u1(_sllv);
+Ip_s3s1s2(_slt);
 Ip_u2u1s3(_sltiu);
 Ip_u3u1u2(_sltu);
 Ip_u2u1u3(_sra);
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4b7160259292..4bfdb9d4c186 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -273,6 +273,7 @@ enum mm_32a_minor_op {
 	mm_and_op = 0x250,
 	mm_or32_op = 0x290,
 	mm_xor32_op = 0x310,
+	mm_slt_op = 0x350,
 	mm_sltu_op = 0x390,
 };
 
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 681c17603a48..6c9906f59c6e 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,10 +12,6 @@
 #include <linux/types.h>
 #include <asm/sgidefs.h>
 
-/* Bits which may be set in sc_used_math */
-#define USEDMATH_FP	(1 << 0)
-#define USEDMATH_MSA	(1 << 1)
-
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
@@ -41,8 +37,6 @@ struct sigcontext {
 	unsigned long		sc_lo2;
 	unsigned long		sc_hi3;
 	unsigned long		sc_lo3;
-	unsigned long long	sc_msaregs[32];	/* Most significant 64 bits */
-	unsigned long		sc_msa_csr;
 };
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -76,8 +70,6 @@ struct sigcontext {
 	__u32	sc_used_math;
 	__u32	sc_dsp;
 	__u32	sc_reserved;
-	__u64	sc_msaregs[32];
-	__u32	sc_msa_csr;
 };
 
 
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 02f075df8f2e..4bb5107511e2 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -293,7 +293,6 @@ void output_sc_defines(void)
 	OFFSET(SC_LO2, sigcontext, sc_lo2);
 	OFFSET(SC_HI3, sigcontext, sc_hi3);
 	OFFSET(SC_LO3, sigcontext, sc_lo3);
-	OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
 	BLANK();
 }
 #endif
@@ -308,7 +307,6 @@ void output_sc_defines(void)
 	OFFSET(SC_MDLO, sigcontext, sc_mdlo);
 	OFFSET(SC_PC, sigcontext, sc_pc);
 	OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
-	OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
 	BLANK();
 }
 #endif
@@ -320,7 +318,6 @@ void output_sc32_defines(void)
 	OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
 	OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
 	OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
-	OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
 	BLANK();
 }
 #endif
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 60e7e5e45af1..8b6538750fe1 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -302,6 +302,9 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
 	    &return_to_handler;
 	int faulted, insns;
 
+	if (unlikely(ftrace_graph_is_dead()))
+		return;
+
 	if (unlikely(atomic_read(&current->tracing_graph_pause)))
 		return;
 
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 4858642d543d..a734b2c2f9ea 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
 
 	board_bind_eic_interrupt = &msc_bind_eic_interrupt;
 
-	for (; nirq >= 0; nirq--, imp++) {
+	for (; nirq > 0; nirq--, imp++) {
 		int n = imp->im_irq;
 
 		switch (imp->im_type) {
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 539b6294b613..00940d1d5c4f 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -74,10 +74,6 @@ _mcount:
 #endif
 
 	/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
-	lw	t1, function_trace_stop
-	bnez	t1, ftrace_stub
-	 nop
-
 	MCOUNT_SAVE_REGS
 #ifdef KBUILD_MCOUNT_RA_ADDRESS
 	PTR_S	MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
@@ -105,9 +101,6 @@ ftrace_stub:
 #else	/* ! CONFIG_DYNAMIC_FTRACE */
 
 NESTED(_mcount, PT_SIZE, ra)
-	lw	t1, function_trace_stop
-	bnez	t1, ftrace_stub
-	 nop
 	PTR_LA	t1, ftrace_stub
 	PTR_L	t2, ftrace_trace_function /* Prepare t2 for (1) */
 	bne	t1, t2, static_trace
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5aa4c6f8cf83..c4c2069d3a20 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
 	if (!coupled_coherence)
 		return;
 
-	smp_mb__before_atomic_inc();
+	smp_mb__before_atomic();
 	atomic_inc(a);
 
 	while (atomic_read(a) < online)
@@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
 
 	/* Indicate that this CPU might not be coherent */
 	cpumask_clear_cpu(cpu, &cpu_coherent_mask);
-	smp_mb__after_clear_bit();
+	smp_mb__after_atomic();
 
 	/* Create a non-coherent mapping of the core ready_count */
 	core_ready_count = per_cpu(ready_count, core);
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 71814272d148..8352523568e6 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,7 +13,6 @@
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
  */
 #include <asm/asm.h>
-#include <asm/asmmacro.h>
 #include <asm/errno.h>
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
 	END(_restore_fp_context32)
 #endif
 
-#ifdef CONFIG_CPU_HAS_MSA
-
-	.macro	save_sc_msareg	wr, off, sc, tmp
-#ifdef CONFIG_64BIT
-	copy_u_d \tmp, \wr, 1
-	EX sd	\tmp, (\off+(\wr*8))(\sc)
-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
-	copy_u_w \tmp, \wr, 2
-	EX sw	\tmp, (\off+(\wr*8)+0)(\sc)
-	copy_u_w \tmp, \wr, 3
-	EX sw	\tmp, (\off+(\wr*8)+4)(\sc)
-#else /* CONFIG_CPU_BIG_ENDIAN */
-	copy_u_w \tmp, \wr, 2
-	EX sw	\tmp, (\off+(\wr*8)+4)(\sc)
-	copy_u_w \tmp, \wr, 3
-	EX sw	\tmp, (\off+(\wr*8)+0)(\sc)
-#endif
-	.endm
-
-/*
- * int _save_msa_context(struct sigcontext *sc)
- *
- * Save the upper 64 bits of each vector register along with the MSA_CSR
- * register into sc. Returns zero on success, else non-zero.
- */
-LEAF(_save_msa_context)
-	save_sc_msareg	0, SC_MSAREGS, a0, t0
-	save_sc_msareg	1, SC_MSAREGS, a0, t0
-	save_sc_msareg	2, SC_MSAREGS, a0, t0
-	save_sc_msareg	3, SC_MSAREGS, a0, t0
-	save_sc_msareg	4, SC_MSAREGS, a0, t0
-	save_sc_msareg	5, SC_MSAREGS, a0, t0
-	save_sc_msareg	6, SC_MSAREGS, a0, t0
-	save_sc_msareg	7, SC_MSAREGS, a0, t0
-	save_sc_msareg	8, SC_MSAREGS, a0, t0
-	save_sc_msareg	9, SC_MSAREGS, a0, t0
-	save_sc_msareg	10, SC_MSAREGS, a0, t0
-	save_sc_msareg	11, SC_MSAREGS, a0, t0
-	save_sc_msareg	12, SC_MSAREGS, a0, t0
-	save_sc_msareg	13, SC_MSAREGS, a0, t0
-	save_sc_msareg	14, SC_MSAREGS, a0, t0
-	save_sc_msareg	15, SC_MSAREGS, a0, t0
-	save_sc_msareg	16, SC_MSAREGS, a0, t0
-	save_sc_msareg	17, SC_MSAREGS, a0, t0
-	save_sc_msareg	18, SC_MSAREGS, a0, t0
-	save_sc_msareg	19, SC_MSAREGS, a0, t0
-	save_sc_msareg	20, SC_MSAREGS, a0, t0
-	save_sc_msareg	21, SC_MSAREGS, a0, t0
-	save_sc_msareg	22, SC_MSAREGS, a0, t0
-	save_sc_msareg	23, SC_MSAREGS, a0, t0
-	save_sc_msareg	24, SC_MSAREGS, a0, t0
-	save_sc_msareg	25, SC_MSAREGS, a0, t0
-	save_sc_msareg	26, SC_MSAREGS, a0, t0
-	save_sc_msareg	27, SC_MSAREGS, a0, t0
-	save_sc_msareg	28, SC_MSAREGS, a0, t0
-	save_sc_msareg	29, SC_MSAREGS, a0, t0
-	save_sc_msareg	30, SC_MSAREGS, a0, t0
-	save_sc_msareg	31, SC_MSAREGS, a0, t0
-	jr	ra
-	 li	v0, 0
-	END(_save_msa_context)
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * int _save_msa_context32(struct sigcontext32 *sc)
- *
- * Save the upper 64 bits of each vector register along with the MSA_CSR
- * register into sc. Returns zero on success, else non-zero.
- */
-LEAF(_save_msa_context32)
-	save_sc_msareg	0, SC32_MSAREGS, a0, t0
-	save_sc_msareg	1, SC32_MSAREGS, a0, t0
-	save_sc_msareg	2, SC32_MSAREGS, a0, t0
-	save_sc_msareg	3, SC32_MSAREGS, a0, t0
-	save_sc_msareg	4, SC32_MSAREGS, a0, t0
-	save_sc_msareg	5, SC32_MSAREGS, a0, t0
-	save_sc_msareg	6, SC32_MSAREGS, a0, t0
-	save_sc_msareg	7, SC32_MSAREGS, a0, t0
-	save_sc_msareg	8, SC32_MSAREGS, a0, t0
-	save_sc_msareg	9, SC32_MSAREGS, a0, t0
-	save_sc_msareg	10, SC32_MSAREGS, a0, t0
-	save_sc_msareg	11, SC32_MSAREGS, a0, t0
-	save_sc_msareg	12, SC32_MSAREGS, a0, t0
-	save_sc_msareg	13, SC32_MSAREGS, a0, t0
-	save_sc_msareg	14, SC32_MSAREGS, a0, t0
-	save_sc_msareg	15, SC32_MSAREGS, a0, t0
-	save_sc_msareg	16, SC32_MSAREGS, a0, t0
-	save_sc_msareg	17, SC32_MSAREGS, a0, t0
-	save_sc_msareg	18, SC32_MSAREGS, a0, t0
-	save_sc_msareg	19, SC32_MSAREGS, a0, t0
-	save_sc_msareg	20, SC32_MSAREGS, a0, t0
-	save_sc_msareg	21, SC32_MSAREGS, a0, t0
-	save_sc_msareg	22, SC32_MSAREGS, a0, t0
-	save_sc_msareg	23, SC32_MSAREGS, a0, t0
-	save_sc_msareg	24, SC32_MSAREGS, a0, t0
-	save_sc_msareg	25, SC32_MSAREGS, a0, t0
-	save_sc_msareg	26, SC32_MSAREGS, a0, t0
-	save_sc_msareg	27, SC32_MSAREGS, a0, t0
-	save_sc_msareg	28, SC32_MSAREGS, a0, t0
-	save_sc_msareg	29, SC32_MSAREGS, a0, t0
-	save_sc_msareg	30, SC32_MSAREGS, a0, t0
-	save_sc_msareg	31, SC32_MSAREGS, a0, t0
-	jr	ra
-	 li	v0, 0
-	END(_save_msa_context32)
-
-#endif /* CONFIG_MIPS32_COMPAT */
-
-	.macro restore_sc_msareg	wr, off, sc, tmp
-#ifdef CONFIG_64BIT
-	EX ld	\tmp, (\off+(\wr*8))(\sc)
-	insert_d \wr, 1, \tmp
-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
-	EX lw	\tmp, (\off+(\wr*8)+0)(\sc)
-	insert_w \wr, 2, \tmp
-	EX lw	\tmp, (\off+(\wr*8)+4)(\sc)
-	insert_w \wr, 3, \tmp
-#else /* CONFIG_CPU_BIG_ENDIAN */
-	EX lw	\tmp, (\off+(\wr*8)+4)(\sc)
-	insert_w \wr, 2, \tmp
-	EX lw	\tmp, (\off+(\wr*8)+0)(\sc)
-	insert_w \wr, 3, \tmp
-#endif
-	.endm
-
-/*
- * int _restore_msa_context(struct sigcontext *sc)
- */
-LEAF(_restore_msa_context)
-	restore_sc_msareg	0, SC_MSAREGS, a0, t0
-	restore_sc_msareg	1, SC_MSAREGS, a0, t0
-	restore_sc_msareg	2, SC_MSAREGS, a0, t0
-	restore_sc_msareg	3, SC_MSAREGS, a0, t0
-	restore_sc_msareg	4, SC_MSAREGS, a0, t0
-	restore_sc_msareg	5, SC_MSAREGS, a0, t0
-	restore_sc_msareg	6, SC_MSAREGS, a0, t0
-	restore_sc_msareg	7, SC_MSAREGS, a0, t0
-	restore_sc_msareg	8, SC_MSAREGS, a0, t0
-	restore_sc_msareg	9, SC_MSAREGS, a0, t0
-	restore_sc_msareg	10, SC_MSAREGS, a0, t0
-	restore_sc_msareg	11, SC_MSAREGS, a0, t0
-	restore_sc_msareg	12, SC_MSAREGS, a0, t0
-	restore_sc_msareg	13, SC_MSAREGS, a0, t0
-	restore_sc_msareg	14, SC_MSAREGS, a0, t0
-	restore_sc_msareg	15, SC_MSAREGS, a0, t0
-	restore_sc_msareg	16, SC_MSAREGS, a0, t0
-	restore_sc_msareg	17, SC_MSAREGS, a0, t0
-	restore_sc_msareg	18, SC_MSAREGS, a0, t0
-	restore_sc_msareg	19, SC_MSAREGS, a0, t0
-	restore_sc_msareg	20, SC_MSAREGS, a0, t0
-	restore_sc_msareg	21, SC_MSAREGS, a0, t0
-	restore_sc_msareg	22, SC_MSAREGS, a0, t0
-	restore_sc_msareg	23, SC_MSAREGS, a0, t0
-	restore_sc_msareg	24, SC_MSAREGS, a0, t0
-	restore_sc_msareg	25, SC_MSAREGS, a0, t0
-	restore_sc_msareg	26, SC_MSAREGS, a0, t0
-	restore_sc_msareg	27, SC_MSAREGS, a0, t0
-	restore_sc_msareg	28, SC_MSAREGS, a0, t0
-	restore_sc_msareg	29, SC_MSAREGS, a0, t0
-	restore_sc_msareg	30, SC_MSAREGS, a0, t0
-	restore_sc_msareg	31, SC_MSAREGS, a0, t0
-	jr	ra
-	 li	v0, 0
-	END(_restore_msa_context)
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * int _restore_msa_context32(struct sigcontext32 *sc)
- */
-LEAF(_restore_msa_context32)
-	restore_sc_msareg	0, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	1, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	2, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	3, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	4, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	5, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	6, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	7, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	8, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	9, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	10, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	11, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	12, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	13, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	14, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	15, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	16, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	17, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	18, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	19, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	20, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	21, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	22, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	23, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	24, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	25, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	26, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	27, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	28, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	29, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	30, SC32_MSAREGS, a0, t0
-	restore_sc_msareg	31, SC32_MSAREGS, a0, t0
-	jr	ra
-	 li	v0, 0
-	END(_restore_msa_context32)
-
-#endif /* CONFIG_MIPS32_COMPAT */
-
-#endif /* CONFIG_CPU_HAS_MSA */
-
 	.set	reorder
 
 	.type	fault@function
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 33133d3df3e5..9e60d117e41e 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -31,7 +31,6 @@
 #include <linux/bitops.h>
 #include <asm/cacheflush.h>
 #include <asm/fpu.h>
-#include <asm/msa.h>
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/cpu-features.h>
@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
 
-extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
-
 struct sigframe {
 	u32 sf_ass[4];		/* argument save space for o32 */
 	u32 sf_pad[2];		/* Was: signal trampoline */
@@ -100,60 +96,20 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
 }
 
 /*
- * These functions will save only the upper 64 bits of the vector registers,
- * since the lower 64 bits have already been saved as the scalar FP context.
- */
-static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
-{
-	int i;
-	int err = 0;
-
-	for (i = 0; i < NUM_FPU_REGS; i++) {
-		err |=
-		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
-			       &sc->sc_msaregs[i]);
-	}
-	err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-	return err;
-}
-
-static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
-{
-	int i;
-	int err = 0;
-	u64 val;
-
-	for (i = 0; i < NUM_FPU_REGS; i++) {
-		err |= __get_user(val, &sc->sc_msaregs[i]);
-		set_fpr64(&current->thread.fpu.fpr[i], 1, val);
-	}
-	err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-	return err;
-}
-
-/*
  * Helper routines
  */
-static int protected_save_fp_context(struct sigcontext __user *sc,
-				     unsigned used_math)
+static int protected_save_fp_context(struct sigcontext __user *sc)
 {
 	int err;
-	bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 #ifndef CONFIG_EVA
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
 			err = save_fp_context(sc);
-			if (save_msa && !err)
-				err = _save_msa_context(sc);
 			unlock_fpu_owner();
 		} else {
 			unlock_fpu_owner();
 			err = copy_fp_to_sigcontext(sc);
-			if (save_msa && !err)
-				err = copy_msa_to_sigcontext(sc);
 		}
 		if (likely(!err))
 			break;
@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
 	 * EVA does not have FPU EVA instructions so saving fpu context directly
 	 * does not work.
 	 */
-	disable_msa();
 	lose_fpu(1);
 	err = save_fp_context(sc); /* this might fail */
-	if (save_msa && !err)
-		err = copy_msa_to_sigcontext(sc);
 #endif
 	return err;
 }
 
-static int protected_restore_fp_context(struct sigcontext __user *sc,
-					unsigned used_math)
+static int protected_restore_fp_context(struct sigcontext __user *sc)
 {
 	int err, tmp __maybe_unused;
-	bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 #ifndef CONFIG_EVA
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
 			err = restore_fp_context(sc);
-			if (restore_msa && !err) {
-				enable_msa();
-				err = _restore_msa_context(sc);
-			} else {
-				/* signal handler may have used MSA */
-				disable_msa();
-			}
 			unlock_fpu_owner();
 		} else {
 			unlock_fpu_owner();
 			err = copy_fp_from_sigcontext(sc);
-			if (!err && (used_math & USEDMATH_MSA))
-				err = copy_msa_from_sigcontext(sc);
 		}
 		if (likely(!err))
 			break;
@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
 	 * EVA does not have FPU EVA instructions so restoring fpu context
 	 * directly does not work.
 	 */
-	enable_msa();
 	lose_fpu(0);
 	err = restore_fp_context(sc); /* this might fail */
-	if (restore_msa && !err)
-		err = copy_msa_from_sigcontext(sc);
 #endif
 	return err;
 }
@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 		err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
 	}
 
-	used_math = used_math() ? USEDMATH_FP : 0;
-	used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
+	used_math = !!used_math();
 	err |= __put_user(used_math, &sc->sc_used_math);
 
 	if (used_math) {
@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 		 * Save FPU state to signal context. Signal handler
 		 * will "inherit" current FPU state.
 		 */
-		err |= protected_save_fp_context(sc, used_math);
+		err |= protected_save_fp_context(sc);
 	}
 	return err;
 }
@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
 }
 
 static int
-check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
+check_and_restore_fp_context(struct sigcontext __user *sc)
 {
 	int err, sig;
 
 	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
 	if (err > 0)
 		err = 0;
-	err |= protected_restore_fp_context(sc, used_math);
+	err |= protected_restore_fp_context(sc);
 	return err ?: sig;
 }
 
@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 	if (used_math) {
 		/* restore fpu context if we have used it before */
 		if (!err)
-			err = check_and_restore_fp_context(sc, used_math);
+			err = check_and_restore_fp_context(sc);
 	} else {
-		/* signal handler may have used FPU or MSA. Disable them. */
-		disable_msa();
+		/* signal handler may have used FPU.  Give it up. */
 		lose_fpu(0);
 	}
 
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 299f956e4db3..bae2e6ee2109 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -30,7 +30,6 @@
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/fpu.h>
-#include <asm/msa.h>
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
 
-extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
-
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
  */
@@ -115,59 +111,19 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
 }
 
 /*
- * These functions will save only the upper 64 bits of the vector registers,
- * since the lower 64 bits have already been saved as the scalar FP context.
- */
-static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-
-	for (i = 0; i < NUM_FPU_REGS; i++) {
-		err |=
-		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
-			       &sc->sc_msaregs[i]);
-	}
-	err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-	return err;
-}
-
-static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-	u64 val;
-
-	for (i = 0; i < NUM_FPU_REGS; i++) {
-		err |= __get_user(val, &sc->sc_msaregs[i]);
-		set_fpr64(&current->thread.fpu.fpr[i], 1, val);
-	}
-	err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-	return err;
-}
-
-/*
  * sigcontext handlers
  */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc,
-				       unsigned used_math)
+static int protected_save_fp_context32(struct sigcontext32 __user *sc)
 {
 	int err;
-	bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
 			err = save_fp_context32(sc);
-			if (save_msa && !err)
-				err = _save_msa_context32(sc);
 			unlock_fpu_owner();
 		} else {
 			unlock_fpu_owner();
 			err = copy_fp_to_sigcontext32(sc);
-			if (save_msa && !err)
-				err = copy_msa_to_sigcontext32(sc);
 		}
 		if (likely(!err))
 			break;
@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
 	return err;
 }
 
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
-					  unsigned used_math)
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
 {
 	int err, tmp __maybe_unused;
-	bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
 			err = restore_fp_context32(sc);
-			if (restore_msa && !err) {
-				enable_msa();
-				err = _restore_msa_context32(sc);
-			} else {
-				/* signal handler may have used MSA */
-				disable_msa();
-			}
 			unlock_fpu_owner();
 		} else {
 			unlock_fpu_owner();
 			err = copy_fp_from_sigcontext32(sc);
-			if (restore_msa && !err)
-				err = copy_msa_from_sigcontext32(sc);
 		}
 		if (likely(!err))
 			break;
@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
 		err |= __put_user(mflo3(), &sc->sc_lo3);
 	}
 
-	used_math = used_math() ? USEDMATH_FP : 0;
-	used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
+	used_math = !!used_math();
 	err |= __put_user(used_math, &sc->sc_used_math);
 
 	if (used_math) {
@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
 		 * Save FPU state to signal context.  Signal handler
 		 * will "inherit" current FPU state.
 		 */
-		err |= protected_save_fp_context32(sc, used_math);
+		err |= protected_save_fp_context32(sc);
 	}
 	return err;
 }
 
 static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc,
-			       unsigned used_math)
+check_and_restore_fp_context32(struct sigcontext32 __user *sc)
 {
 	int err, sig;
 
 	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
 	if (err > 0)
 		err = 0;
-	err |= protected_restore_fp_context32(sc, used_math);
+	err |= protected_restore_fp_context32(sc);
 	return err ?: sig;
 }
 
@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
 	if (used_math) {
 		/* restore fpu context if we have used it before */
 		if (!err)
-			err = check_and_restore_fp_context32(sc, used_math);
+			err = check_and_restore_fp_context32(sc);
 	} else {
-		/* signal handler may have used FPU or MSA. Disable them. */
-		disable_msa();
+		/* signal handler may have used FPU.  Give it up. */
 		lose_fpu(0);
 	}
 
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index df0598d9bfdd..949f2c6827a0 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -301,7 +301,7 @@ static int cps_cpu_disable(void)
 
 	core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
 	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
-	smp_mb__after_atomic_dec();
+	smp_mb__after_atomic();
 	set_cpu_online(cpu, false);
 	cpu_clear(cpu, cpu_callin_map);
 
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index d687c6e3258d..4fda672cb58e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -384,6 +384,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 
 	kfree(vcpu->arch.guest_ebase);
 	kfree(vcpu->arch.kseg0_commpage);
+	kfree(vcpu);
 }
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index 53f1d2287084..8e97acbbe22c 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -34,13 +34,22 @@
  * Special constants
  */
 
-#define DPCNST(s, b, m)							\
+/*
+ * Older GCC requires the inner braces for initialization of union ieee754dp's
+ * anonymous struct member.  Without an error will result.
+ */
+#define xPCNST(s, b, m, ebias)						\
 {									\
-	.sign	= (s),							\
-	.bexp	= (b) + DP_EBIAS,					\
-	.mant	= (m)							\
+	{								\
+		.sign	= (s),						\
+		.bexp	= (b) + ebias,					\
+		.mant	= (m)						\
+	}								\
 }
 
+#define DPCNST(s, b, m)							\
+	xPCNST(s, b, m, DP_EBIAS)
+
 const union ieee754dp __ieee754dp_spcvals[] = {
 	DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL),	/* + zero   */
 	DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL),	/* - zero   */
@@ -62,11 +71,7 @@ const union ieee754dp __ieee754dp_spcvals[] = {
 };
 
 #define SPCNST(s, b, m)							\
-{									\
-	.sign	= (s),							\
-	.bexp	= (b) + SP_EBIAS,					\
-	.mant	= (m)							\
-}
+	xPCNST(s, b, m, SP_EBIAS)
 
 const union ieee754sp __ieee754sp_spcvals[] = {
 	SPCNST(0, SP_EMIN - 1, 0x000000),	/* + zero   */
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 775c2800cba2..8399ddf03a02 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -102,6 +102,7 @@ static struct insn insn_table_MM[] = {
 	{ insn_sd, 0, 0 },
 	{ insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
 	{ insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
+	{ insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
 	{ insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
 	{ insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
 	{ insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 38792c2364f5..6708a2dbf934 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -89,7 +89,7 @@ static struct insn insn_table[] = {
 	{ insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-	{ insn_lh,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+	{ insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_lld,  M(lld_op, 0, 0, 0, 0, 0),	RS | RT | SIMM },
 	{ insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_lui,  M(lui_op, 0, 0, 0, 0, 0),	RT | SIMM },
@@ -110,6 +110,7 @@ static struct insn insn_table[] = {
 	{ insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
 	{ insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
 	{ insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
+	{ insn_slt,  M(spec_op, 0, 0, 0, 0, slt_op),  RS | RT | RD },
 	{ insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
 	{ insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
 	{ insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 00515805fe41..a01b0d6cedd2 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -53,7 +53,7 @@ enum opcode {
 	insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
 	insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
 	insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
-	insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra,
+	insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
 	insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
 	insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
 	insn_xor, insn_xori, insn_yield,
@@ -139,6 +139,13 @@ Ip_u1u2u3(op)						\
 }							\
 UASM_EXPORT_SYMBOL(uasm_i##op);
 
+#define I_s3s1s2(op)					\
+Ip_s3s1s2(op)						\
+{							\
+	build_insn(buf, insn##op, b, c, a);		\
+}							\
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
 #define I_u2u1u3(op)					\
 Ip_u2u1u3(op)						\
 {							\
@@ -289,6 +296,7 @@ I_u2s3u1(_scd)
 I_u2s3u1(_sd)
 I_u2u1u3(_sll)
 I_u3u2u1(_sllv)
+I_s3s1s2(_slt)
 I_u2u1s3(_sltiu)
 I_u3u1u2(_sltu)
 I_u2u1u3(_sra)
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index a67b9753330b..b87390a56a2f 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -119,8 +119,6 @@
 /* Arguments used by JIT */
 #define ARGS_USED_BY_JIT	2 /* only applicable to 64-bit */
 
-#define FLAG_NEED_X_RESET	(1 << 0)
-
 #define SBIT(x)			(1 << (x)) /* Signed version of BIT() */
 
 /**
@@ -153,6 +151,8 @@ static inline int optimize_div(u32 *k)
 	return 0;
 }
 
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
+
 /* Simply emit the instruction if the JIT memory space has been allocated */
 #define emit_instr(ctx, func, ...)			\
 do {							\
@@ -166,9 +166,7 @@ do {							\
 /* Determine if immediate is within the 16-bit signed range */
 static inline bool is_range16(s32 imm)
 {
-	if (imm >= SBIT(15) || imm < -SBIT(15))
-		return true;
-	return false;
+	return !(imm >= SBIT(15) || imm < -SBIT(15));
 }
 
 static inline void emit_addu(unsigned int dst, unsigned int src1,
@@ -187,7 +185,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
 {
 	if (ctx->target != NULL) {
 		/* addiu can only handle s16 */
-		if (is_range16(imm)) {
+		if (!is_range16(imm)) {
 			u32 *p = &ctx->target[ctx->idx];
 			uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
 			p = &ctx->target[ctx->idx + 1];
@@ -199,7 +197,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
 	}
 	ctx->idx++;
 
-	if (is_range16(imm))
+	if (!is_range16(imm))
 		ctx->idx++;
 }
 
@@ -240,7 +238,7 @@ static inline void emit_daddiu(unsigned int dst, unsigned int src,
 static inline void emit_addiu(unsigned int dst, unsigned int src,
 			      u32 imm, struct jit_ctx *ctx)
 {
-	if (is_range16(imm)) {
+	if (!is_range16(imm)) {
 		emit_load_imm(r_tmp, imm, ctx);
 		emit_addu(dst, r_tmp, src, ctx);
 	} else {
@@ -313,8 +311,11 @@ static inline void emit_sll(unsigned int dst, unsigned int src,
 			    unsigned int sa, struct jit_ctx *ctx)
 {
 	/* sa is 5-bits long */
-	BUG_ON(sa >= BIT(5));
-	emit_instr(ctx, sll, dst, src, sa);
+	if (sa >= BIT(5))
+		/* Shifting >= 32 results in zero */
+		emit_jit_reg_move(dst, r_zero, ctx);
+	else
+		emit_instr(ctx, sll, dst, src, sa);
 }
 
 static inline void emit_srlv(unsigned int dst, unsigned int src,
@@ -327,8 +328,17 @@ static inline void emit_srl(unsigned int dst, unsigned int src,
 			    unsigned int sa, struct jit_ctx *ctx)
 {
 	/* sa is 5-bits long */
-	BUG_ON(sa >= BIT(5));
-	emit_instr(ctx, srl, dst, src, sa);
+	if (sa >= BIT(5))
+		/* Shifting >= 32 results in zero */
+		emit_jit_reg_move(dst, r_zero, ctx);
+	else
+		emit_instr(ctx, srl, dst, src, sa);
+}
+
+static inline void emit_slt(unsigned int dst, unsigned int src1,
+			    unsigned int src2, struct jit_ctx *ctx)
+{
+	emit_instr(ctx, slt, dst, src1, src2);
 }
 
 static inline void emit_sltu(unsigned int dst, unsigned int src1,
@@ -341,7 +351,7 @@ static inline void emit_sltiu(unsigned dst, unsigned int src,
 			      unsigned int imm, struct jit_ctx *ctx)
 {
 	/* 16 bit immediate */
-	if (is_range16((s32)imm)) {
+	if (!is_range16((s32)imm)) {
 		emit_load_imm(r_tmp, imm, ctx);
 		emit_sltu(dst, src, r_tmp, ctx);
 	} else {
@@ -408,7 +418,7 @@ static inline void emit_div(unsigned int dst, unsigned int src,
 		u32 *p = &ctx->target[ctx->idx];
 		uasm_i_divu(&p, dst, src);
 		p = &ctx->target[ctx->idx + 1];
-		uasm_i_mfhi(&p, dst);
+		uasm_i_mflo(&p, dst);
 	}
 	ctx->idx += 2; /* 2 insts */
 }
@@ -443,6 +453,17 @@ static inline void emit_wsbh(unsigned int dst, unsigned int src,
 	emit_instr(ctx, wsbh, dst, src);
 }
 
+/* load pointer to register */
+static inline void emit_load_ptr(unsigned int dst, unsigned int src,
+				     int imm, struct jit_ctx *ctx)
+{
+	/* src contains the base addr of the 32/64-pointer */
+	if (config_enabled(CONFIG_64BIT))
+		emit_instr(ctx, ld, dst, imm, src);
+	else
+		emit_instr(ctx, lw, dst, imm, src);
+}
+
 /* load a function pointer to register */
 static inline void emit_load_func(unsigned int reg, ptr imm,
 				  struct jit_ctx *ctx)
@@ -545,29 +566,13 @@ static inline u16 align_sp(unsigned int num)
 	return num;
 }
 
-static inline void update_on_xread(struct jit_ctx *ctx)
-{
-	if (!(ctx->flags & SEEN_X))
-		ctx->flags |= FLAG_NEED_X_RESET;
-
-	ctx->flags |= SEEN_X;
-}
-
 static bool is_load_to_a(u16 inst)
 {
 	switch (inst) {
-	case BPF_S_LD_W_LEN:
-	case BPF_S_LD_W_ABS:
-	case BPF_S_LD_H_ABS:
-	case BPF_S_LD_B_ABS:
-	case BPF_S_ANC_CPU:
-	case BPF_S_ANC_IFINDEX:
-	case BPF_S_ANC_MARK:
-	case BPF_S_ANC_PROTOCOL:
-	case BPF_S_ANC_RXHASH:
-	case BPF_S_ANC_VLAN_TAG:
-	case BPF_S_ANC_VLAN_TAG_PRESENT:
-	case BPF_S_ANC_QUEUE:
+	case BPF_LD | BPF_W | BPF_LEN:
+	case BPF_LD | BPF_W | BPF_ABS:
+	case BPF_LD | BPF_H | BPF_ABS:
+	case BPF_LD | BPF_B | BPF_ABS:
 		return true;
 	default:
 		return false;
@@ -618,7 +623,10 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
 	if (ctx->flags & SEEN_MEM) {
 		if (real_off % (RSIZE * 2))
 			real_off += RSIZE;
-		emit_addiu(r_M, r_sp, real_off, ctx);
+		if (config_enabled(CONFIG_64BIT))
+			emit_daddiu(r_M, r_sp, real_off, ctx);
+		else
+			emit_addiu(r_M, r_sp, real_off, ctx);
 	}
 }
 
@@ -705,11 +713,11 @@ static void build_prologue(struct jit_ctx *ctx)
 	if (ctx->flags & SEEN_SKB)
 		emit_reg_move(r_skb, MIPS_R_A0, ctx);
 
-	if (ctx->flags & FLAG_NEED_X_RESET)
+	if (ctx->flags & SEEN_X)
 		emit_jit_reg_move(r_X, r_zero, ctx);
 
 	/* Do not leak kernel data to userspace */
-	if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
+	if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
 		emit_jit_reg_move(r_A, r_zero, ctx);
 }
 
@@ -757,13 +765,17 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
 	return (u64)err << 32 | ntohl(ret);
 }
 
-#define PKT_TYPE_MAX 7
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX	(7 << 5)
+#else
+#define PKT_TYPE_MAX	7
+#endif
 static int pkt_type_offset(void)
 {
 	struct sk_buff skb_probe = {
 		.pkt_type = ~0,
 	};
-	char *ct = (char *)&skb_probe;
+	u8 *ct = (u8 *)&skb_probe;
 	unsigned int off;
 
 	for (off = 0; off < sizeof(struct sk_buff); off++) {
@@ -783,46 +795,62 @@ static int build_body(struct jit_ctx *ctx)
 	u32 k, b_off __maybe_unused;
 
 	for (i = 0; i < prog->len; i++) {
+		u16 code;
+
 		inst = &(prog->insns[i]);
 		pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
 			 __func__, inst->code, inst->jt, inst->jf, inst->k);
 		k = inst->k;
+		code = bpf_anc_helper(inst);
 
 		if (ctx->target == NULL)
 			ctx->offsets[i] = ctx->idx * 4;
 
-		switch (inst->code) {
-		case BPF_S_LD_IMM:
+		switch (code) {
+		case BPF_LD | BPF_IMM:
 			/* A <- k ==> li r_A, k */
 			ctx->flags |= SEEN_A;
 			emit_load_imm(r_A, k, ctx);
 			break;
-		case BPF_S_LD_W_LEN:
+		case BPF_LD | BPF_W | BPF_LEN:
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 			/* A <- len ==> lw r_A, offset(skb) */
 			ctx->flags |= SEEN_SKB | SEEN_A;
 			off = offsetof(struct sk_buff, len);
 			emit_load(r_A, r_skb, off, ctx);
 			break;
-		case BPF_S_LD_MEM:
+		case BPF_LD | BPF_MEM:
 			/* A <- M[k] ==> lw r_A, offset(M) */
 			ctx->flags |= SEEN_MEM | SEEN_A;
 			emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
 			break;
-		case BPF_S_LD_W_ABS:
+		case BPF_LD | BPF_W | BPF_ABS:
 			/* A <- P[k:4] */
 			load_order = 2;
 			goto load;
-		case BPF_S_LD_H_ABS:
+		case BPF_LD | BPF_H | BPF_ABS:
 			/* A <- P[k:2] */
 			load_order = 1;
 			goto load;
-		case BPF_S_LD_B_ABS:
+		case BPF_LD | BPF_B | BPF_ABS:
 			/* A <- P[k:1] */
 			load_order = 0;
 load:
+			/* the interpreter will deal with the negative K */
+			if ((int)k < 0)
+				return -ENOTSUPP;
+
 			emit_load_imm(r_off, k, ctx);
 load_common:
+			/*
+			 * We may got here from the indirect loads so
+			 * return if offset is negative.
+			 */
+			emit_slt(r_s0, r_off, r_zero, ctx);
+			emit_bcond(MIPS_COND_NE, r_s0, r_zero,
+				   b_imm(prog->len, ctx), ctx);
+			emit_reg_move(r_ret, r_zero, ctx);
+
 			ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
 				SEEN_SKB | SEEN_A;
 
@@ -852,39 +880,42 @@ load_common:
 			emit_b(b_imm(prog->len, ctx), ctx);
 			emit_reg_move(r_ret, r_zero, ctx);
 			break;
-		case BPF_S_LD_W_IND:
+		case BPF_LD | BPF_W | BPF_IND:
 			/* A <- P[X + k:4] */
 			load_order = 2;
 			goto load_ind;
-		case BPF_S_LD_H_IND:
+		case BPF_LD | BPF_H | BPF_IND:
 			/* A <- P[X + k:2] */
 			load_order = 1;
 			goto load_ind;
-		case BPF_S_LD_B_IND:
+		case BPF_LD | BPF_B | BPF_IND:
 			/* A <- P[X + k:1] */
 			load_order = 0;
 load_ind:
-			update_on_xread(ctx);
 			ctx->flags |= SEEN_OFF | SEEN_X;
 			emit_addiu(r_off, r_X, k, ctx);
 			goto load_common;
-		case BPF_S_LDX_IMM:
+		case BPF_LDX | BPF_IMM:
 			/* X <- k */
 			ctx->flags |= SEEN_X;
 			emit_load_imm(r_X, k, ctx);
 			break;
-		case BPF_S_LDX_MEM:
+		case BPF_LDX | BPF_MEM:
 			/* X <- M[k] */
 			ctx->flags |= SEEN_X | SEEN_MEM;
 			emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
 			break;
-		case BPF_S_LDX_W_LEN:
+		case BPF_LDX | BPF_W | BPF_LEN:
 			/* X <- len */
 			ctx->flags |= SEEN_X | SEEN_SKB;
 			off = offsetof(struct sk_buff, len);
 			emit_load(r_X, r_skb, off, ctx);
 			break;
-		case BPF_S_LDX_B_MSH:
+		case BPF_LDX | BPF_B | BPF_MSH:
+			/* the interpreter will deal with the negative K */
+			if ((int)k < 0)
+				return -ENOTSUPP;
+
 			/* X <- 4 * (P[k:1] & 0xf) */
 			ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
 			/* Load offset to a1 */
@@ -917,50 +948,49 @@ load_ind:
 			emit_b(b_imm(prog->len, ctx), ctx);
 			emit_load_imm(r_ret, 0, ctx); /* delay slot */
 			break;
-		case BPF_S_ST:
+		case BPF_ST:
 			/* M[k] <- A */
 			ctx->flags |= SEEN_MEM | SEEN_A;
 			emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
 			break;
-		case BPF_S_STX:
+		case BPF_STX:
 			/* M[k] <- X */
 			ctx->flags |= SEEN_MEM | SEEN_X;
 			emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
 			break;
-		case BPF_S_ALU_ADD_K:
+		case BPF_ALU | BPF_ADD | BPF_K:
 			/* A += K */
 			ctx->flags |= SEEN_A;
 			emit_addiu(r_A, r_A, k, ctx);
 			break;
-		case BPF_S_ALU_ADD_X:
+		case BPF_ALU | BPF_ADD | BPF_X:
 			/* A += X */
 			ctx->flags |= SEEN_A | SEEN_X;
 			emit_addu(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_SUB_K:
+		case BPF_ALU | BPF_SUB | BPF_K:
 			/* A -= K */
 			ctx->flags |= SEEN_A;
 			emit_addiu(r_A, r_A, -k, ctx);
 			break;
-		case BPF_S_ALU_SUB_X:
+		case BPF_ALU | BPF_SUB | BPF_X:
 			/* A -= X */
 			ctx->flags |= SEEN_A | SEEN_X;
 			emit_subu(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_MUL_K:
+		case BPF_ALU | BPF_MUL | BPF_K:
 			/* A *= K */
 			/* Load K to scratch register before MUL */
 			ctx->flags |= SEEN_A | SEEN_S0;
 			emit_load_imm(r_s0, k, ctx);
 			emit_mul(r_A, r_A, r_s0, ctx);
 			break;
-		case BPF_S_ALU_MUL_X:
+		case BPF_ALU | BPF_MUL | BPF_X:
 			/* A *= X */
-			update_on_xread(ctx);
 			ctx->flags |= SEEN_A | SEEN_X;
 			emit_mul(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_DIV_K:
+		case BPF_ALU | BPF_DIV | BPF_K:
 			/* A /= k */
 			if (k == 1)
 				break;
@@ -973,7 +1003,7 @@ load_ind:
 			emit_load_imm(r_s0, k, ctx);
 			emit_div(r_A, r_s0, ctx);
 			break;
-		case BPF_S_ALU_MOD_K:
+		case BPF_ALU | BPF_MOD | BPF_K:
 			/* A %= k */
 			if (k == 1 || optimize_div(&k)) {
 				ctx->flags |= SEEN_A;
@@ -984,9 +1014,8 @@ load_ind:
 				emit_mod(r_A, r_s0, ctx);
 			}
 			break;
-		case BPF_S_ALU_DIV_X:
+		case BPF_ALU | BPF_DIV | BPF_X:
 			/* A /= X */
-			update_on_xread(ctx);
 			ctx->flags |= SEEN_X | SEEN_A;
 			/* Check if r_X is zero */
 			emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -994,9 +1023,8 @@ load_ind:
 			emit_load_imm(r_val, 0, ctx); /* delay slot */
 			emit_div(r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_MOD_X:
+		case BPF_ALU | BPF_MOD | BPF_X:
 			/* A %= X */
-			update_on_xread(ctx);
 			ctx->flags |= SEEN_X | SEEN_A;
 			/* Check if r_X is zero */
 			emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -1004,94 +1032,89 @@ load_ind:
 			emit_load_imm(r_val, 0, ctx); /* delay slot */
 			emit_mod(r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_OR_K:
+		case BPF_ALU | BPF_OR | BPF_K:
 			/* A |= K */
 			ctx->flags |= SEEN_A;
 			emit_ori(r_A, r_A, k, ctx);
 			break;
-		case BPF_S_ALU_OR_X:
+		case BPF_ALU | BPF_OR | BPF_X:
 			/* A |= X */
-			update_on_xread(ctx);
 			ctx->flags |= SEEN_A;
 			emit_ori(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_XOR_K:
+		case BPF_ALU | BPF_XOR | BPF_K:
 			/* A ^= k */
 			ctx->flags |= SEEN_A;
 			emit_xori(r_A, r_A, k, ctx);
 			break;
-		case BPF_S_ANC_ALU_XOR_X:
-		case BPF_S_ALU_XOR_X:
+		case BPF_ANC | SKF_AD_ALU_XOR_X:
+		case BPF_ALU | BPF_XOR | BPF_X:
 			/* A ^= X */
-			update_on_xread(ctx);
 			ctx->flags |= SEEN_A;
 			emit_xor(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_AND_K:
+		case BPF_ALU | BPF_AND | BPF_K:
 			/* A &= K */
 			ctx->flags |= SEEN_A;
 			emit_andi(r_A, r_A, k, ctx);
 			break;
-		case BPF_S_ALU_AND_X:
+		case BPF_ALU | BPF_AND | BPF_X:
 			/* A &= X */
-			update_on_xread(ctx);
 			ctx->flags |= SEEN_A | SEEN_X;
 			emit_and(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_LSH_K:
+		case BPF_ALU | BPF_LSH | BPF_K:
 			/* A <<= K */
 			ctx->flags |= SEEN_A;
 			emit_sll(r_A, r_A, k, ctx);
 			break;
-		case BPF_S_ALU_LSH_X:
+		case BPF_ALU | BPF_LSH | BPF_X:
 			/* A <<= X */
 			ctx->flags |= SEEN_A | SEEN_X;
-			update_on_xread(ctx);
 			emit_sllv(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_RSH_K:
+		case BPF_ALU | BPF_RSH | BPF_K:
 			/* A >>= K */
 			ctx->flags |= SEEN_A;
 			emit_srl(r_A, r_A, k, ctx);
 			break;
-		case BPF_S_ALU_RSH_X:
+		case BPF_ALU | BPF_RSH | BPF_X:
 			ctx->flags |= SEEN_A | SEEN_X;
-			update_on_xread(ctx);
 			emit_srlv(r_A, r_A, r_X, ctx);
 			break;
-		case BPF_S_ALU_NEG:
+		case BPF_ALU | BPF_NEG:
 			/* A = -A */
 			ctx->flags |= SEEN_A;
 			emit_neg(r_A, ctx);
 			break;
-		case BPF_S_JMP_JA:
+		case BPF_JMP | BPF_JA:
 			/* pc += K */
 			emit_b(b_imm(i + k + 1, ctx), ctx);
 			emit_nop(ctx);
 			break;
-		case BPF_S_JMP_JEQ_K:
+		case BPF_JMP | BPF_JEQ | BPF_K:
 			/* pc += ( A == K ) ? pc->jt : pc->jf */
 			condt = MIPS_COND_EQ | MIPS_COND_K;
 			goto jmp_cmp;
-		case BPF_S_JMP_JEQ_X:
+		case BPF_JMP | BPF_JEQ | BPF_X:
 			ctx->flags |= SEEN_X;
 			/* pc += ( A == X ) ? pc->jt : pc->jf */
 			condt = MIPS_COND_EQ | MIPS_COND_X;
 			goto jmp_cmp;
-		case BPF_S_JMP_JGE_K:
+		case BPF_JMP | BPF_JGE | BPF_K:
 			/* pc += ( A >= K ) ? pc->jt : pc->jf */
 			condt = MIPS_COND_GE | MIPS_COND_K;
 			goto jmp_cmp;
-		case BPF_S_JMP_JGE_X:
+		case BPF_JMP | BPF_JGE | BPF_X:
 			ctx->flags |= SEEN_X;
 			/* pc += ( A >= X ) ? pc->jt : pc->jf */
 			condt = MIPS_COND_GE | MIPS_COND_X;
 			goto jmp_cmp;
-		case BPF_S_JMP_JGT_K:
+		case BPF_JMP | BPF_JGT | BPF_K:
 			/* pc += ( A > K ) ? pc->jt : pc->jf */
 			condt = MIPS_COND_GT | MIPS_COND_K;
 			goto jmp_cmp;
-		case BPF_S_JMP_JGT_X:
+		case BPF_JMP | BPF_JGT | BPF_X:
 			ctx->flags |= SEEN_X;
 			/* pc += ( A > X ) ? pc->jt : pc->jf */
 			condt = MIPS_COND_GT | MIPS_COND_X;
@@ -1109,7 +1132,7 @@ jmp_cmp:
 				}
 				/* A < (K|X) ? r_scrach = 1 */
 				b_off = b_imm(i + inst->jf + 1, ctx);
-				emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off,
+				emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
 					   ctx);
 				emit_nop(ctx);
 				/* A > (K|X) ? scratch = 0 */
@@ -1167,7 +1190,7 @@ jmp_cmp:
 				}
 			}
 			break;
-		case BPF_S_JMP_JSET_K:
+		case BPF_JMP | BPF_JSET | BPF_K:
 			ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
 			/* pc += (A & K) ? pc -> jt : pc -> jf */
 			emit_load_imm(r_s1, k, ctx);
@@ -1181,7 +1204,7 @@ jmp_cmp:
 			emit_b(b_off, ctx);
 			emit_nop(ctx);
 			break;
-		case BPF_S_JMP_JSET_X:
+		case BPF_JMP | BPF_JSET | BPF_X:
 			ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
 			/* pc += (A & X) ? pc -> jt : pc -> jf */
 			emit_and(r_s0, r_A, r_X, ctx);
@@ -1194,7 +1217,7 @@ jmp_cmp:
 			emit_b(b_off, ctx);
 			emit_nop(ctx);
 			break;
-		case BPF_S_RET_A:
+		case BPF_RET | BPF_A:
 			ctx->flags |= SEEN_A;
 			if (i != prog->len - 1)
 				/*
@@ -1204,7 +1227,7 @@ jmp_cmp:
 				emit_b(b_imm(prog->len, ctx), ctx);
 			emit_reg_move(r_ret, r_A, ctx); /* delay slot */
 			break;
-		case BPF_S_RET_K:
+		case BPF_RET | BPF_K:
 			/*
 			 * It can emit two instructions so it does not fit on
 			 * the delay slot.
@@ -1219,19 +1242,18 @@ jmp_cmp:
 				emit_nop(ctx);
 			}
 			break;
-		case BPF_S_MISC_TAX:
+		case BPF_MISC | BPF_TAX:
 			/* X = A */
 			ctx->flags |= SEEN_X | SEEN_A;
 			emit_jit_reg_move(r_X, r_A, ctx);
 			break;
-		case BPF_S_MISC_TXA:
+		case BPF_MISC | BPF_TXA:
 			/* A = X */
 			ctx->flags |= SEEN_A | SEEN_X;
-			update_on_xread(ctx);
 			emit_jit_reg_move(r_A, r_X, ctx);
 			break;
 		/* AUX */
-		case BPF_S_ANC_PROTOCOL:
+		case BPF_ANC | SKF_AD_PROTOCOL:
 			/* A = ntohs(skb->protocol */
 			ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -1256,7 +1278,7 @@ jmp_cmp:
 			}
 #endif
 			break;
-		case BPF_S_ANC_CPU:
+		case BPF_ANC | SKF_AD_CPU:
 			ctx->flags |= SEEN_A | SEEN_OFF;
 			/* A = current_thread_info()->cpu */
 			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
@@ -1265,11 +1287,12 @@ jmp_cmp:
 			/* $28/gp points to the thread_info struct */
 			emit_load(r_A, 28, off, ctx);
 			break;
-		case BPF_S_ANC_IFINDEX:
+		case BPF_ANC | SKF_AD_IFINDEX:
 			/* A = skb->dev->ifindex */
 			ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
 			off = offsetof(struct sk_buff, dev);
-			emit_load(r_s0, r_skb, off, ctx);
+			/* Load *dev pointer */
+			emit_load_ptr(r_s0, r_skb, off, ctx);
 			/* error (0) in the delay slot */
 			emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
 				   b_imm(prog->len, ctx), ctx);
@@ -1279,31 +1302,36 @@ jmp_cmp:
 			off = offsetof(struct net_device, ifindex);
 			emit_load(r_A, r_s0, off, ctx);
 			break;
-		case BPF_S_ANC_MARK:
+		case BPF_ANC | SKF_AD_MARK:
 			ctx->flags |= SEEN_SKB | SEEN_A;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 			off = offsetof(struct sk_buff, mark);
 			emit_load(r_A, r_skb, off, ctx);
 			break;
-		case BPF_S_ANC_RXHASH:
+		case BPF_ANC | SKF_AD_RXHASH:
 			ctx->flags |= SEEN_SKB | SEEN_A;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 			off = offsetof(struct sk_buff, hash);
 			emit_load(r_A, r_skb, off, ctx);
 			break;
-		case BPF_S_ANC_VLAN_TAG:
-		case BPF_S_ANC_VLAN_TAG_PRESENT:
+		case BPF_ANC | SKF_AD_VLAN_TAG:
+		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
 			ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 						  vlan_tci) != 2);
 			off = offsetof(struct sk_buff, vlan_tci);
 			emit_half_load(r_s0, r_skb, off, ctx);
-			if (inst->code == BPF_S_ANC_VLAN_TAG)
-				emit_and(r_A, r_s0, VLAN_VID_MASK, ctx);
-			else
-				emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
+			if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+				emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
+			} else {
+				emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
+				/* return 1 if present */
+				emit_sltu(r_A, r_zero, r_A, ctx);
+			}
 			break;
-		case BPF_S_ANC_PKTTYPE:
+		case BPF_ANC | SKF_AD_PKTTYPE:
+			ctx->flags |= SEEN_SKB;
+
 			off = pkt_type_offset();
 
 			if (off < 0)
@@ -1311,8 +1339,12 @@ jmp_cmp:
 			emit_load_byte(r_tmp, r_skb, off, ctx);
 			/* Keep only the last 3 bits */
 			emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+			/* Get the actual packet type to the lower 3 bits */
+			emit_srl(r_A, r_A, 5, ctx);
+#endif
 			break;
-		case BPF_S_ANC_QUEUE:
+		case BPF_ANC | SKF_AD_QUEUE:
 			ctx->flags |= SEEN_SKB | SEEN_A;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
 						  queue_mapping) != 2);
@@ -1322,8 +1354,8 @@ jmp_cmp:
 			emit_half_load(r_A, r_skb, off, ctx);
 			break;
 		default:
-			pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__,
-				inst->code);
+			pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
+				 inst->code);
 			return -1;
 		}
 	}