summary refs log tree commit diff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/drm/drm_fixed.h14
-rw-r--r--include/dt-bindings/clock/vf610-clock.h4
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h2
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/cgroup.h3
-rw-r--r--include/linux/cgroup_subsys.h45
-rw-r--r--include/linux/crc-t10dif.h4
-rw-r--r--include/linux/drbd.h6
-rw-r--r--include/linux/drbd_genl.h2
-rw-r--r--include/linux/drbd_limits.h9
-rw-r--r--include/linux/edac.h7
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h137
-rw-r--r--include/linux/mod_devicetable.h5
-rw-r--r--include/linux/platform_data/mmc-pxamci.h2
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/shdma-base.h4
-rw-r--r--include/linux/tick.h6
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/linux/vmpressure.h3
-rw-r--r--include/trace/events/bcache.h381
-rw-r--r--include/trace/ftrace.h4
-rw-r--r--include/uapi/linux/firewire-cdev.h4
-rw-r--r--include/uapi/linux/usb/ch11.h11
-rw-r--r--include/xen/interface/io/blkif.h53
-rw-r--r--include/xen/interface/io/ring.h5
27 files changed, 489 insertions, 247 deletions
diff --git a/include/acpi/video.h b/include/acpi/video.h
index b26dc4fb7ba8..61109f2609fc 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -17,21 +17,12 @@ struct acpi_device;
 #define ACPI_VIDEO_DISPLAY_LEGACY_TV      0x0200
 
 #if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
-extern int __acpi_video_register(bool backlight_quirks);
-static inline int acpi_video_register(void)
-{
-	return __acpi_video_register(false);
-}
-static inline int acpi_video_register_with_quirks(void)
-{
-	return __acpi_video_register(true);
-}
+extern int acpi_video_register(void);
 extern void acpi_video_unregister(void);
 extern int acpi_video_get_edid(struct acpi_device *device, int type,
 			       int device_id, void **edid);
 #else
 static inline int acpi_video_register(void) { return 0; }
-static inline int acpi_video_register_with_quirks(void) { return 0; }
 static inline void acpi_video_unregister(void) { return; }
 static inline int acpi_video_get_edid(struct acpi_device *device, int type,
 				      int device_id, void **edid)
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index f5e1168c7647..d639049a613d 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -84,12 +84,12 @@ static inline int drm_fixp2int(int64_t a)
 	return ((s64)a) >> DRM_FIXED_POINT;
 }
 
-static inline s64 drm_fixp_msbset(int64_t a)
+static inline unsigned drm_fixp_msbset(int64_t a)
 {
 	unsigned shift, sign = (a >> 63) & 1;
 
 	for (shift = 62; shift > 0; --shift)
-		if ((a >> shift) != sign)
+		if (((a >> shift) & 1) != sign)
 			return shift;
 
 	return 0;
@@ -100,9 +100,9 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
 	unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
 	s64 result;
 
-	if (shift > 63) {
-		shift = shift - 63;
-		a >>= shift >> 1;
+	if (shift > 61) {
+		shift = shift - 61;
+		a >>= (shift >> 1) + (shift & 1);
 		b >>= shift >> 1;
 	} else
 		shift = 0;
@@ -120,7 +120,7 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
 
 static inline s64 drm_fixp_div(s64 a, s64 b)
 {
-	unsigned shift = 63 - drm_fixp_msbset(a);
+	unsigned shift = 62 - drm_fixp_msbset(a);
 	s64 result;
 
 	a <<= shift;
@@ -154,7 +154,7 @@ static inline s64 drm_fixp_exp(s64 x)
 	}
 
 	if (x < 0)
-		sum = drm_fixp_div(1, sum);
+		sum = drm_fixp_div(DRM_FIXED_ONE, sum);
 
 	return sum;
 }
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 15e997fa78f2..4aa2b48cd151 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -158,6 +158,8 @@
 #define VF610_CLK_GPU_SEL		145
 #define VF610_CLK_GPU_EN		146
 #define VF610_CLK_GPU2D			147
-#define VF610_CLK_END			148
+#define VF610_CLK_ENET0			148
+#define VF610_CLK_ENET1			149
+#define VF610_CLK_END			150
 
 #endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 469e0325e6f4..2fbc804e1a45 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -5,7 +5,7 @@
 #ifndef _DT_BINDINGS_PINCTRL_AM33XX_H
 #define _DT_BINDINGS_PINCTRL_AM33XX_H
 
-#include <include/dt-bindings/pinctrl/omap.h>
+#include <dt-bindings/pinctrl/omap.h>
 
 /* am33xx specific mux bit defines */
 #undef PULL_ENA
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6ad72f92469c..353ba256f368 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -191,7 +191,6 @@ extern bool wmi_has_guid(const char *guid);
 #define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO			0x0200
 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR		0x0400
 #define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO		0x0800
-#define ACPI_VIDEO_SKIP_BACKLIGHT			0x1000
 
 #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
 
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 297462b9f41a..e9ac882868c0 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -542,8 +542,7 @@ int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
 
 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
-int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id,
-				    char *buf, size_t buflen);
+int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
 
 int cgroup_task_count(const struct cgroup *cgrp);
 
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 6e7ec64b69ab..b613ffd402d1 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -1,86 +1,55 @@
-/* Add subsystem definitions of the form SUBSYS(<name>) in this
- * file. Surround each one by a line of comment markers so that
- * patches don't collide
+/*
+ * List of cgroup subsystems.
+ *
+ * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
  */
-
-/* */
-
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CPUSETS)
 SUBSYS(cpuset)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG)
 SUBSYS(debug)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED)
 SUBSYS(cpu_cgroup)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT)
 SUBSYS(cpuacct)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_MEMCG)
 SUBSYS(mem_cgroup)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE)
 SUBSYS(devices)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER)
 SUBSYS(freezer)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_NET_CLS_CGROUP)
 SUBSYS(net_cls)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP)
 SUBSYS(blkio)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF)
 SUBSYS(perf)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_NETPRIO_CGROUP)
 SUBSYS(net_prio)
 #endif
 
-/* */
-
 #if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB)
 SUBSYS(hugetlb)
 #endif
-
-/* */
-
-#ifdef CONFIG_CGROUP_BCACHE
-SUBSYS(bcache)
-#endif
-
-/* */
+/*
+ * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
+ */
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index b3cb71f0d3b0..a9c96d865ee7 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -3,10 +3,6 @@
 
 #include <linux/types.h>
 
-#define CRC_T10DIF_DIGEST_SIZE 2
-#define CRC_T10DIF_BLOCK_SIZE 1
-
-__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len);
 __u16 crc_t10dif(unsigned char const *, size_t);
 
 #endif
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 1b4d4ee1168f..de7d74ab3de6 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -177,7 +177,11 @@ enum drbd_ret_code {
 	ERR_NEED_APV_100	= 163,
 	ERR_NEED_ALLOW_TWO_PRI  = 164,
 	ERR_MD_UNCLEAN          = 165,
-
+	ERR_MD_LAYOUT_CONNECTED = 166,
+	ERR_MD_LAYOUT_TOO_BIG   = 167,
+	ERR_MD_LAYOUT_TOO_SMALL = 168,
+	ERR_MD_LAYOUT_NO_FIT    = 169,
+	ERR_IMPLICIT_SHRINK     = 170,
 	/* insert new ones above this line */
 	AFTER_LAST_ERR_CODE
 };
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index d0d8fac8a6e4..e8c44572b8cb 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -181,6 +181,8 @@ GENL_struct(DRBD_NLA_RESIZE_PARMS, 7, resize_parms,
 	__u64_field(1, DRBD_GENLA_F_MANDATORY,	resize_size)
 	__flg_field(2, DRBD_GENLA_F_MANDATORY,	resize_force)
 	__flg_field(3, DRBD_GENLA_F_MANDATORY,	no_resync)
+	__u32_field_def(4, 0 /* OPTIONAL */, al_stripes, DRBD_AL_STRIPES_DEF)
+	__u32_field_def(5, 0 /* OPTIONAL */, al_stripe_size, DRBD_AL_STRIPE_SIZE_DEF)
 )
 
 GENL_struct(DRBD_NLA_STATE_INFO, 8, state_info,
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h
index 1fedf2b17cc8..17e50bb00521 100644
--- a/include/linux/drbd_limits.h
+++ b/include/linux/drbd_limits.h
@@ -215,4 +215,13 @@
 #define DRBD_ALWAYS_ASBP_DEF	0
 #define DRBD_USE_RLE_DEF	1
 
+#define DRBD_AL_STRIPES_MIN     1
+#define DRBD_AL_STRIPES_MAX     1024
+#define DRBD_AL_STRIPES_DEF     1
+#define DRBD_AL_STRIPES_SCALE   '1'
+
+#define DRBD_AL_STRIPE_SIZE_MIN   4
+#define DRBD_AL_STRIPE_SIZE_MAX   16777216
+#define DRBD_AL_STRIPE_SIZE_DEF   32
+#define DRBD_AL_STRIPE_SIZE_SCALE 'k' /* kilobytes */
 #endif
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 0b763276f619..5c6d7fbaf89e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -622,7 +622,7 @@ struct edac_raw_error_desc {
  */
 struct mem_ctl_info {
 	struct device			dev;
-	struct bus_type			bus;
+	struct bus_type			*bus;
 
 	struct list_head link;	/* for global list of mem_ctl_info structs */
 
@@ -742,4 +742,9 @@ struct mem_ctl_info {
 #endif
 };
 
+/*
+ * Maximum number of memory controllers in the coherent fabric.
+ */
+#define EDAC_MAX_MCS	16
+
 #endif
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 3b0e820375ab..5d7782e42b8f 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -436,6 +436,7 @@ struct fw_iso_context {
 	int type;
 	int channel;
 	int speed;
+	bool drop_overflow_headers;
 	size_t header_size;
 	union {
 		fw_iso_callback_t sc;
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index dab34a1deb2c..b6bdcd66c07d 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -103,15 +103,15 @@
 #define IMX6Q_GPR1_EXC_MON_MASK			BIT(22)
 #define IMX6Q_GPR1_EXC_MON_OKAY			0x0
 #define IMX6Q_GPR1_EXC_MON_SLVE			BIT(22)
-#define IMX6Q_GPR1_MIPI_IPU2_SEL_MASK		BIT(21)
-#define IMX6Q_GPR1_MIPI_IPU2_SEL_GASKET		0x0
-#define IMX6Q_GPR1_MIPI_IPU2_SEL_IOMUX		BIT(21)
-#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK		BIT(20)
-#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET		0x0
-#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX		BIT(20)
-#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK		BIT(19)
+#define IMX6Q_GPR1_ENET_CLK_SEL_MASK		BIT(21)
+#define IMX6Q_GPR1_ENET_CLK_SEL_PAD		0
+#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP		BIT(21)
+#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK		BIT(20)
 #define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET		0x0
-#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX		BIT(19)
+#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX		BIT(20)
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK		BIT(19)
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET		0x0
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX		BIT(19)
 #define IMX6Q_GPR1_PCIE_TEST_PD			BIT(18)
 #define IMX6Q_GPR1_IPU_VPU_MUX_MASK		BIT(17)
 #define IMX6Q_GPR1_IPU_VPU_MUX_IPU1		0x0
@@ -279,41 +279,88 @@
 #define IMX6Q_GPR13_CAN2_STOP_REQ		BIT(29)
 #define IMX6Q_GPR13_CAN1_STOP_REQ		BIT(28)
 #define IMX6Q_GPR13_ENET_STOP_REQ		BIT(27)
-#define IMX6Q_GPR13_SATA_PHY_8_MASK		(0x7 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_0_5_DB		(0x0 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_1_0_DB		(0x1 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_1_5_DB		(0x2 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_2_0_DB		(0x3 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_2_5_DB		(0x4 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_3_0_DB		(0x5 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_3_5_DB		(0x6 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_4_0_DB		(0x7 << 24)
-#define IMX6Q_GPR13_SATA_PHY_7_MASK		(0x1f << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA1I		(0x10 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA1M		(0x10 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA1X		(0x1a << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA2I		(0x12 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA2M		(0x12 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA2X		(0x1a << 19)
-#define IMX6Q_GPR13_SATA_PHY_6_MASK		(0x7 << 16)
-#define IMX6Q_GPR13_SATA_SPEED_MASK		BIT(15)
-#define IMX6Q_GPR13_SATA_SPEED_1P5G		0x0
-#define IMX6Q_GPR13_SATA_SPEED_3P0G		BIT(15)
-#define IMX6Q_GPR13_SATA_PHY_5			BIT(14)
-#define IMX6Q_GPR13_SATA_PHY_4_MASK		(0x7 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_16_16		(0x0 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_14_16		(0x1 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_12_16		(0x2 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_10_16		(0x3 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_9_16		(0x4 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_8_16		(0x5 << 11)
-#define IMX6Q_GPR13_SATA_PHY_3_MASK		(0xf << 7)
-#define IMX6Q_GPR13_SATA_PHY_3_OFF		0x7
-#define IMX6Q_GPR13_SATA_PHY_2_MASK		(0x1f << 2)
-#define IMX6Q_GPR13_SATA_PHY_2_OFF		0x2
-#define IMX6Q_GPR13_SATA_PHY_1_MASK		(0x3 << 0)
-#define IMX6Q_GPR13_SATA_PHY_1_FAST		(0x0 << 0)
-#define IMX6Q_GPR13_SATA_PHY_1_MED		(0x1 << 0)
-#define IMX6Q_GPR13_SATA_PHY_1_SLOW		(0x2 << 0)
-
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK		(0x7 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB	(0x0 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB	(0x1 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB	(0x2 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB	(0x3 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB	(0x4 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB	(0x5 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB	(0x6 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB	(0x7 << 24)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK	(0x1f << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I	(0x10 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M	(0x10 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X	(0x1a << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I	(0x12 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M	(0x12 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X	(0x1a << 19)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK	(0x7 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F	(0x0 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F	(0x1 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F	(0x2 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F	(0x3 << 16)
+#define IMX6Q_GPR13_SATA_SPD_MODE_MASK		BIT(15)
+#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G		0x0
+#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G		BIT(15)
+#define IMX6Q_GPR13_SATA_MPLL_SS_EN		BIT(14)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK		(0x7 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16		(0x0 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16		(0x1 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16		(0x2 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16		(0x3 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16		(0x4 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16		(0x5 << 11)
+#define IMX6Q_GPR13_SATA_TX_BOOST_MASK		(0xf << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB	(0x0 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB	(0x1 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB	(0x2 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB	(0x3 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB	(0x4 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB	(0x5 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB	(0x6 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB	(0x7 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB	(0x8 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB	(0x9 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB	(0xa << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB	(0xb << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB	(0xc << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB	(0xd << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB	(0xe << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB	(0xf << 7)
+#define IMX6Q_GPR13_SATA_TX_LVL_MASK		(0x1f << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V		(0x00 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V		(0x01 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V		(0x02 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V		(0x03 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V		(0x04 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V		(0x05 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V		(0x06 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V		(0x07 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V		(0x08 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V		(0x09 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V		(0x0a << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V		(0x0b << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V		(0x0c << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V		(0x0d << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V		(0x0e << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V		(0x0f << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V		(0x10 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V		(0x11 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V		(0x12 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V		(0x13 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V		(0x14 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V		(0x15 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V		(0x16 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V		(0x17 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V		(0x18 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V		(0x19 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V		(0x1a << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V		(0x1b << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V		(0x1c << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V		(0x1d << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V		(0x1e << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V		(0x1f << 2)
+#define IMX6Q_GPR13_SATA_MPLL_CLK_EN		BIT(1)
+#define IMX6Q_GPR13_SATA_TX_EDGE_RATE		BIT(0)
 #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b62d4af6c667..45e921401b06 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -361,7 +361,8 @@ struct ssb_device_id {
 	__u16	vendor;
 	__u16	coreid;
 	__u8	revision;
-};
+	__u8	__pad;
+} __attribute__((packed, aligned(2)));
 #define SSB_DEVICE(_vendor, _coreid, _revision)  \
 	{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
 #define SSB_DEVTABLE_END  \
@@ -377,7 +378,7 @@ struct bcma_device_id {
 	__u16	id;
 	__u8	rev;
 	__u8	class;
-};
+} __attribute__((packed,aligned(2)));
 #define BCMA_CORE(_manuf, _id, _rev, _class)  \
 	{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
 #define BCMA_CORETABLE_END  \
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 9eb515bb799d..1706b3597ce0 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -12,7 +12,7 @@ struct pxamci_platform_data {
 	unsigned long detect_delay_ms;		/* delay in millisecond before detecting cards after interrupt */
 	int (*init)(struct device *, irq_handler_t , void *);
 	int (*get_ro)(struct device *);
-	void (*setpower)(struct device *, unsigned int);
+	int (*setpower)(struct device *, unsigned int);
 	void (*exit)(struct device *, void *);
 	int gpio_card_detect;			/* gpio detecting card insertion */
 	int gpio_card_ro;			/* gpio detecting read only toggle */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 50d04b92ceda..d722490da030 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1628,6 +1628,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
 #define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
+#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
 
 /*
  * Only the _current_ task can read/write to tsk->flags, but other
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 382cf710ca9a..5b1c9848124c 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -124,6 +124,10 @@ void shdma_chan_remove(struct shdma_chan *schan);
 int shdma_init(struct device *dev, struct shdma_dev *sdev,
 		    int chan_num);
 void shdma_cleanup(struct shdma_dev *sdev);
+#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
 bool shdma_chan_filter(struct dma_chan *chan, void *arg);
+#else
+#define shdma_chan_filter NULL
+#endif
 
 #endif
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9180f4b85e6d..62bd8b72873c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -174,10 +174,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
 
 
-# ifdef CONFIG_CPU_IDLE_GOV_MENU
-extern void menu_hrtimer_cancel(void);
-# else
-static inline void menu_hrtimer_cancel(void) {}
-# endif /* CONFIG_CPU_IDLE_GOV_MENU */
-
 #endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a232b7ece1f6..0eec2689b955 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -367,17 +367,6 @@ struct usb_bus {
 
 /* ----------------------------------------------------------------------- */
 
-/* This is arbitrary.
- * From USB 2.0 spec Table 11-13, offset 7, a hub can
- * have up to 255 ports. The most yet reported is 10.
- *
- * Current Wireless USB host hardware (Intel i1480 for example) allows
- * up to 22 devices to connect. Upcoming hardware might raise that
- * limit. Because the arrays need to add a bit for hub status data, we
- * do 31, so plus one evens out to four bytes.
- */
-#define USB_MAXCHILDREN		(31)
-
 struct usb_tt;
 
 enum usb_device_removable {
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 76be077340ea..7dc17e2456de 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -12,7 +12,7 @@ struct vmpressure {
 	unsigned long scanned;
 	unsigned long reclaimed;
 	/* The lock is used to keep the scanned/reclaimed above in sync. */
-	struct mutex sr_lock;
+	struct spinlock sr_lock;
 
 	/* The list of vmpressure_event structs. */
 	struct list_head events;
@@ -30,6 +30,7 @@ extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
 
 extern void vmpressure_init(struct vmpressure *vmpr);
+extern void vmpressure_cleanup(struct vmpressure *vmpr);
 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
 extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
 extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 3cc5a0b278c3..5ebda976ea93 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -9,9 +9,7 @@
 struct search;
 
 DECLARE_EVENT_CLASS(bcache_request,
-
 	TP_PROTO(struct search *s, struct bio *bio),
-
 	TP_ARGS(s, bio),
 
 	TP_STRUCT__entry(
@@ -22,7 +20,6 @@ DECLARE_EVENT_CLASS(bcache_request,
 		__field(dev_t,		orig_sector		)
 		__field(unsigned int,	nr_sector		)
 		__array(char,		rwbs,	6		)
-		__array(char,		comm,	TASK_COMM_LEN	)
 	),
 
 	TP_fast_assign(
@@ -33,36 +30,66 @@ DECLARE_EVENT_CLASS(bcache_request,
 		__entry->orig_sector	= bio->bi_sector - 16;
 		__entry->nr_sector	= bio->bi_size >> 9;
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
-	TP_printk("%d,%d %s %llu + %u [%s] (from %d,%d @ %llu)",
+	TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->rwbs,
-		  (unsigned long long)__entry->sector,
-		  __entry->nr_sector, __entry->comm,
-		  __entry->orig_major, __entry->orig_minor,
+		  __entry->rwbs, (unsigned long long)__entry->sector,
+		  __entry->nr_sector, __entry->orig_major, __entry->orig_minor,
 		  (unsigned long long)__entry->orig_sector)
 );
 
-DEFINE_EVENT(bcache_request, bcache_request_start,
+DECLARE_EVENT_CLASS(bkey,
+	TP_PROTO(struct bkey *k),
+	TP_ARGS(k),
 
-	TP_PROTO(struct search *s, struct bio *bio),
+	TP_STRUCT__entry(
+		__field(u32,	size				)
+		__field(u32,	inode				)
+		__field(u64,	offset				)
+		__field(bool,	dirty				)
+	),
 
-	TP_ARGS(s, bio)
+	TP_fast_assign(
+		__entry->inode	= KEY_INODE(k);
+		__entry->offset	= KEY_OFFSET(k);
+		__entry->size	= KEY_SIZE(k);
+		__entry->dirty	= KEY_DIRTY(k);
+	),
+
+	TP_printk("%u:%llu len %u dirty %u", __entry->inode,
+		  __entry->offset, __entry->size, __entry->dirty)
 );
 
-DEFINE_EVENT(bcache_request, bcache_request_end,
+DECLARE_EVENT_CLASS(btree_node,
+	TP_PROTO(struct btree *b),
+	TP_ARGS(b),
+
+	TP_STRUCT__entry(
+		__field(size_t,		bucket			)
+	),
 
+	TP_fast_assign(
+		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
+	),
+
+	TP_printk("bucket %zu", __entry->bucket)
+);
+
+/* request.c */
+
+DEFINE_EVENT(bcache_request, bcache_request_start,
 	TP_PROTO(struct search *s, struct bio *bio),
+	TP_ARGS(s, bio)
+);
 
+DEFINE_EVENT(bcache_request, bcache_request_end,
+	TP_PROTO(struct search *s, struct bio *bio),
 	TP_ARGS(s, bio)
 );
 
 DECLARE_EVENT_CLASS(bcache_bio,
-
 	TP_PROTO(struct bio *bio),
-
 	TP_ARGS(bio),
 
 	TP_STRUCT__entry(
@@ -70,7 +97,6 @@ DECLARE_EVENT_CLASS(bcache_bio,
 		__field(sector_t,	sector			)
 		__field(unsigned int,	nr_sector		)
 		__array(char,		rwbs,	6		)
-		__array(char,		comm,	TASK_COMM_LEN	)
 	),
 
 	TP_fast_assign(
@@ -78,191 +104,328 @@ DECLARE_EVENT_CLASS(bcache_bio,
 		__entry->sector		= bio->bi_sector;
 		__entry->nr_sector	= bio->bi_size >> 9;
 		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
 	),
 
-	TP_printk("%d,%d  %s %llu + %u [%s]",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->rwbs,
-		  (unsigned long long)__entry->sector,
-		  __entry->nr_sector, __entry->comm)
+	TP_printk("%d,%d  %s %llu + %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+		  (unsigned long long)__entry->sector, __entry->nr_sector)
 );
 
-
-DEFINE_EVENT(bcache_bio, bcache_passthrough,
-
+DEFINE_EVENT(bcache_bio, bcache_bypass_sequential,
 	TP_PROTO(struct bio *bio),
+	TP_ARGS(bio)
+);
 
+DEFINE_EVENT(bcache_bio, bcache_bypass_congested,
+	TP_PROTO(struct bio *bio),
 	TP_ARGS(bio)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_cache_hit,
+TRACE_EVENT(bcache_read,
+	TP_PROTO(struct bio *bio, bool hit, bool bypass),
+	TP_ARGS(bio, hit, bypass),
 
-	TP_PROTO(struct bio *bio),
+	TP_STRUCT__entry(
+		__field(dev_t,		dev			)
+		__field(sector_t,	sector			)
+		__field(unsigned int,	nr_sector		)
+		__array(char,		rwbs,	6		)
+		__field(bool,		cache_hit		)
+		__field(bool,		bypass			)
+	),
 
-	TP_ARGS(bio)
+	TP_fast_assign(
+		__entry->dev		= bio->bi_bdev->bd_dev;
+		__entry->sector		= bio->bi_sector;
+		__entry->nr_sector	= bio->bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		__entry->cache_hit = hit;
+		__entry->bypass = bypass;
+	),
+
+	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->rwbs, (unsigned long long)__entry->sector,
+		  __entry->nr_sector, __entry->cache_hit, __entry->bypass)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_cache_miss,
+TRACE_EVENT(bcache_write,
+	TP_PROTO(struct bio *bio, bool writeback, bool bypass),
+	TP_ARGS(bio, writeback, bypass),
 
-	TP_PROTO(struct bio *bio),
+	TP_STRUCT__entry(
+		__field(dev_t,		dev			)
+		__field(sector_t,	sector			)
+		__field(unsigned int,	nr_sector		)
+		__array(char,		rwbs,	6		)
+		__field(bool,		writeback		)
+		__field(bool,		bypass			)
+	),
 
-	TP_ARGS(bio)
+	TP_fast_assign(
+		__entry->dev		= bio->bi_bdev->bd_dev;
+		__entry->sector		= bio->bi_sector;
+		__entry->nr_sector	= bio->bi_size >> 9;
+		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+		__entry->writeback = writeback;
+		__entry->bypass = bypass;
+	),
+
+	TP_printk("%d,%d  %s %llu + %u hit %u bypass %u",
+		  MAJOR(__entry->dev), MINOR(__entry->dev),
+		  __entry->rwbs, (unsigned long long)__entry->sector,
+		  __entry->nr_sector, __entry->writeback, __entry->bypass)
 );
 
 DEFINE_EVENT(bcache_bio, bcache_read_retry,
-
 	TP_PROTO(struct bio *bio),
-
 	TP_ARGS(bio)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_writethrough,
+DEFINE_EVENT(bkey, bcache_cache_insert,
+	TP_PROTO(struct bkey *k),
+	TP_ARGS(k)
+);
 
-	TP_PROTO(struct bio *bio),
+/* Journal */
 
-	TP_ARGS(bio)
-);
+DECLARE_EVENT_CLASS(cache_set,
+	TP_PROTO(struct cache_set *c),
+	TP_ARGS(c),
 
-DEFINE_EVENT(bcache_bio, bcache_writeback,
+	TP_STRUCT__entry(
+		__array(char,		uuid,	16 )
+	),
 
-	TP_PROTO(struct bio *bio),
+	TP_fast_assign(
+		memcpy(__entry->uuid, c->sb.set_uuid, 16);
+	),
 
-	TP_ARGS(bio)
+	TP_printk("%pU", __entry->uuid)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_write_skip,
-
-	TP_PROTO(struct bio *bio),
+DEFINE_EVENT(bkey, bcache_journal_replay_key,
+	TP_PROTO(struct bkey *k),
+	TP_ARGS(k)
+);
 
-	TP_ARGS(bio)
+DEFINE_EVENT(cache_set, bcache_journal_full,
+	TP_PROTO(struct cache_set *c),
+	TP_ARGS(c)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_btree_read,
+DEFINE_EVENT(cache_set, bcache_journal_entry_full,
+	TP_PROTO(struct cache_set *c),
+	TP_ARGS(c)
+);
 
+DEFINE_EVENT(bcache_bio, bcache_journal_write,
 	TP_PROTO(struct bio *bio),
-
 	TP_ARGS(bio)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_btree_write,
+/* Btree */
 
-	TP_PROTO(struct bio *bio),
+DEFINE_EVENT(cache_set, bcache_btree_cache_cannibalize,
+	TP_PROTO(struct cache_set *c),
+	TP_ARGS(c)
+);
 
-	TP_ARGS(bio)
+DEFINE_EVENT(btree_node, bcache_btree_read,
+	TP_PROTO(struct btree *b),
+	TP_ARGS(b)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_write_dirty,
+TRACE_EVENT(bcache_btree_write,
+	TP_PROTO(struct btree *b),
+	TP_ARGS(b),
 
-	TP_PROTO(struct bio *bio),
+	TP_STRUCT__entry(
+		__field(size_t,		bucket			)
+		__field(unsigned,	block			)
+		__field(unsigned,	keys			)
+	),
 
-	TP_ARGS(bio)
+	TP_fast_assign(
+		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
+		__entry->block	= b->written;
+		__entry->keys	= b->sets[b->nsets].data->keys;
+	),
+
+	TP_printk("bucket %zu", __entry->bucket)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_read_dirty,
+DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
+	TP_PROTO(struct btree *b),
+	TP_ARGS(b)
+);
 
-	TP_PROTO(struct bio *bio),
+DEFINE_EVENT(btree_node, bcache_btree_node_alloc_fail,
+	TP_PROTO(struct btree *b),
+	TP_ARGS(b)
+);
 
-	TP_ARGS(bio)
+DEFINE_EVENT(btree_node, bcache_btree_node_free,
+	TP_PROTO(struct btree *b),
+	TP_ARGS(b)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_write_moving,
+TRACE_EVENT(bcache_btree_gc_coalesce,
+	TP_PROTO(unsigned nodes),
+	TP_ARGS(nodes),
 
-	TP_PROTO(struct bio *bio),
+	TP_STRUCT__entry(
+		__field(unsigned,	nodes			)
+	),
 
-	TP_ARGS(bio)
+	TP_fast_assign(
+		__entry->nodes	= nodes;
+	),
+
+	TP_printk("coalesced %u nodes", __entry->nodes)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_read_moving,
+DEFINE_EVENT(cache_set, bcache_gc_start,
+	TP_PROTO(struct cache_set *c),
+	TP_ARGS(c)
+);
 
-	TP_PROTO(struct bio *bio),
+DEFINE_EVENT(cache_set, bcache_gc_end,
+	TP_PROTO(struct cache_set *c),
+	TP_ARGS(c)
+);
 
-	TP_ARGS(bio)
+DEFINE_EVENT(bkey, bcache_gc_copy,
+	TP_PROTO(struct bkey *k),
+	TP_ARGS(k)
 );
 
-DEFINE_EVENT(bcache_bio, bcache_journal_write,
+DEFINE_EVENT(bkey, bcache_gc_copy_collision,
+	TP_PROTO(struct bkey *k),
+	TP_ARGS(k)
+);
 
-	TP_PROTO(struct bio *bio),
+TRACE_EVENT(bcache_btree_insert_key,
+	TP_PROTO(struct btree *b, struct bkey *k, unsigned op, unsigned status),
+	TP_ARGS(b, k, op, status),
 
-	TP_ARGS(bio)
-);
+	TP_STRUCT__entry(
+		__field(u64,	btree_node			)
+		__field(u32,	btree_level			)
+		__field(u32,	inode				)
+		__field(u64,	offset				)
+		__field(u32,	size				)
+		__field(u8,	dirty				)
+		__field(u8,	op				)
+		__field(u8,	status				)
+	),
 
-DECLARE_EVENT_CLASS(bcache_cache_bio,
+	TP_fast_assign(
+		__entry->btree_node = PTR_BUCKET_NR(b->c, &b->key, 0);
+		__entry->btree_level = b->level;
+		__entry->inode	= KEY_INODE(k);
+		__entry->offset	= KEY_OFFSET(k);
+		__entry->size	= KEY_SIZE(k);
+		__entry->dirty	= KEY_DIRTY(k);
+		__entry->op = op;
+		__entry->status = status;
+	),
 
-	TP_PROTO(struct bio *bio,
-		 sector_t orig_sector,
-		 struct block_device* orig_bdev),
+	TP_printk("%u for %u at %llu(%u): %u:%llu len %u dirty %u",
+		  __entry->status, __entry->op,
+		  __entry->btree_node, __entry->btree_level,
+		  __entry->inode, __entry->offset,
+		  __entry->size, __entry->dirty)
+);
 
-	TP_ARGS(bio, orig_sector, orig_bdev),
+DECLARE_EVENT_CLASS(btree_split,
+	TP_PROTO(struct btree *b, unsigned keys),
+	TP_ARGS(b, keys),
 
 	TP_STRUCT__entry(
-		__field(dev_t,		dev			)
-		__field(dev_t,		orig_dev		)
-		__field(sector_t,	sector			)
-		__field(sector_t,	orig_sector		)
-		__field(unsigned int,	nr_sector		)
-		__array(char,		rwbs,	6		)
-		__array(char,		comm,	TASK_COMM_LEN	)
+		__field(size_t,		bucket			)
+		__field(unsigned,	keys			)
 	),
 
 	TP_fast_assign(
-		__entry->dev		= bio->bi_bdev->bd_dev;
-		__entry->orig_dev	= orig_bdev->bd_dev;
-		__entry->sector		= bio->bi_sector;
-		__entry->orig_sector	= orig_sector;
-		__entry->nr_sector	= bio->bi_size >> 9;
-		blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
-		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+		__entry->bucket	= PTR_BUCKET_NR(b->c, &b->key, 0);
+		__entry->keys	= keys;
 	),
 
-	TP_printk("%d,%d  %s %llu + %u [%s] (from %d,%d %llu)",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->rwbs,
-		  (unsigned long long)__entry->sector,
-		  __entry->nr_sector, __entry->comm,
-		  MAJOR(__entry->orig_dev), MINOR(__entry->orig_dev),
-		  (unsigned long long)__entry->orig_sector)
+	TP_printk("bucket %zu keys %u", __entry->bucket, __entry->keys)
 );
 
-DEFINE_EVENT(bcache_cache_bio, bcache_cache_insert,
-
-	TP_PROTO(struct bio *bio,
-		 sector_t orig_sector,
-		 struct block_device *orig_bdev),
+DEFINE_EVENT(btree_split, bcache_btree_node_split,
+	TP_PROTO(struct btree *b, unsigned keys),
+	TP_ARGS(b, keys)
+);
 
-	TP_ARGS(bio, orig_sector, orig_bdev)
+DEFINE_EVENT(btree_split, bcache_btree_node_compact,
+	TP_PROTO(struct btree *b, unsigned keys),
+	TP_ARGS(b, keys)
 );
 
-DECLARE_EVENT_CLASS(bcache_gc,
+DEFINE_EVENT(btree_node, bcache_btree_set_root,
+	TP_PROTO(struct btree *b),
+	TP_ARGS(b)
+);
 
-	TP_PROTO(uint8_t *uuid),
+/* Allocator */
 
-	TP_ARGS(uuid),
+TRACE_EVENT(bcache_alloc_invalidate,
+	TP_PROTO(struct cache *ca),
+	TP_ARGS(ca),
 
 	TP_STRUCT__entry(
-		__field(uint8_t *,	uuid)
+		__field(unsigned,	free			)
+		__field(unsigned,	free_inc		)
+		__field(unsigned,	free_inc_size		)
+		__field(unsigned,	unused			)
 	),
 
 	TP_fast_assign(
-		__entry->uuid		= uuid;
+		__entry->free		= fifo_used(&ca->free);
+		__entry->free_inc	= fifo_used(&ca->free_inc);
+		__entry->free_inc_size	= ca->free_inc.size;
+		__entry->unused		= fifo_used(&ca->unused);
 	),
 
-	TP_printk("%pU", __entry->uuid)
+	TP_printk("free %u free_inc %u/%u unused %u", __entry->free,
+		  __entry->free_inc, __entry->free_inc_size, __entry->unused)
 );
 
+TRACE_EVENT(bcache_alloc_fail,
+	TP_PROTO(struct cache *ca),
+	TP_ARGS(ca),
 
-DEFINE_EVENT(bcache_gc, bcache_gc_start,
+	TP_STRUCT__entry(
+		__field(unsigned,	free			)
+		__field(unsigned,	free_inc		)
+		__field(unsigned,	unused			)
+		__field(unsigned,	blocked			)
+	),
 
-	     TP_PROTO(uint8_t *uuid),
+	TP_fast_assign(
+		__entry->free		= fifo_used(&ca->free);
+		__entry->free_inc	= fifo_used(&ca->free_inc);
+		__entry->unused		= fifo_used(&ca->unused);
+		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
+	),
 
-	     TP_ARGS(uuid)
+	TP_printk("free %u free_inc %u unused %u blocked %u", __entry->free,
+		  __entry->free_inc, __entry->unused, __entry->blocked)
 );
 
-DEFINE_EVENT(bcache_gc, bcache_gc_end,
+/* Background writeback */
 
-	     TP_PROTO(uint8_t *uuid),
+DEFINE_EVENT(bkey, bcache_writeback,
+	TP_PROTO(struct bkey *k),
+	TP_ARGS(k)
+);
 
-	     TP_ARGS(uuid)
+DEFINE_EVENT(bkey, bcache_writeback_collision,
+	TP_PROTO(struct bkey *k),
+	TP_ARGS(k)
 );
 
 #endif /* _TRACE_BCACHE_H */
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index d615f78cc6b6..41a6643e2136 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -670,10 +670,6 @@ perf_trace_##call(void *__data, proto)					\
 			     sizeof(u64));				\
 	__entry_size -= sizeof(u32);					\
 									\
-	if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE,		\
-		      "profile buffer not large enough"))		\
-		return;							\
-									\
 	entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare(	\
 		__entry_size, event_call->event.type, &__regs, &rctx);	\
 	if (!entry)							\
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index d50036953497..1db453e4b550 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -215,8 +215,8 @@ struct fw_cdev_event_request2 {
  * with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
  * %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
  * without the interrupt bit set that the kernel's internal buffer for @header
- * is about to overflow.  (In the last case, kernels with ABI version < 5 drop
- * header data up to the next interrupt packet.)
+ * is about to overflow.  (In the last case, ABI versions < 5 drop header data
+ * up to the next interrupt packet.)
  *
  * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
  *
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 7692dc69ccf7..331499d597fa 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -11,6 +11,17 @@
 
 #include <linux/types.h>	/* __u8 etc */
 
+/* This is arbitrary.
+ * From USB 2.0 spec Table 11-13, offset 7, a hub can
+ * have up to 255 ports. The most yet reported is 10.
+ *
+ * Current Wireless USB host hardware (Intel i1480 for example) allows
+ * up to 22 devices to connect. Upcoming hardware might raise that
+ * limit. Because the arrays need to add a bit for hub status data, we
+ * use 31, so plus one evens out to four bytes.
+ */
+#define USB_MAXCHILDREN		31
+
 /*
  * Hub request types
  */
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ffd4652de91c..65e12099ef89 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -103,12 +103,46 @@ typedef uint64_t blkif_sector_t;
 #define BLKIF_OP_DISCARD           5
 
 /*
+ * Recognized if "feature-max-indirect-segments" in present in the backend
+ * xenbus info. The "feature-max-indirect-segments" node contains the maximum
+ * number of segments allowed by the backend per request. If the node is
+ * present, the frontend might use blkif_request_indirect structs in order to
+ * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
+ * maximum number of indirect segments is fixed by the backend, but the
+ * frontend can issue requests with any number of indirect segments as long as
+ * it's less than the number provided by the backend. The indirect_grefs field
+ * in blkif_request_indirect should be filled by the frontend with the
+ * grant references of the pages that are holding the indirect segments.
+ * This pages are filled with an array of blkif_request_segment_aligned
+ * that hold the information about the segments. The number of indirect
+ * pages to use is determined by the maximum number of segments
+ * a indirect request contains. Every indirect page can contain a maximum
+ * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
+ * so to calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments/512).
+ *
+ * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
+ * create the "feature-max-indirect-segments" node!
+ */
+#define BLKIF_OP_INDIRECT          6
+
+/*
  * Maximum scatter/gather segments per request.
  * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
  * NB. This could be 12 if the ring indexes weren't stored in the same page.
  */
 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 
+#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
+
+struct blkif_request_segment_aligned {
+	grant_ref_t gref;        /* reference to I/O buffer frame        */
+	/* @first_sect: first sector in frame to transfer (inclusive).   */
+	/* @last_sect: last sector in frame to transfer (inclusive).     */
+	uint8_t     first_sect, last_sect;
+	uint16_t    _pad; /* padding to make it 8 bytes, so it's cache-aligned */
+} __attribute__((__packed__));
+
 struct blkif_request_rw {
 	uint8_t        nr_segments;  /* number of segments                   */
 	blkif_vdev_t   handle;       /* only for read/write requests         */
@@ -147,12 +181,31 @@ struct blkif_request_other {
 	uint64_t     id;           /* private guest value, echoed in resp  */
 } __attribute__((__packed__));
 
+struct blkif_request_indirect {
+	uint8_t        indirect_op;
+	uint16_t       nr_segments;
+#ifdef CONFIG_X86_64
+	uint32_t       _pad1;        /* offsetof(blkif_...,u.indirect.id) == 8 */
+#endif
+	uint64_t       id;
+	blkif_sector_t sector_number;
+	blkif_vdev_t   handle;
+	uint16_t       _pad2;
+	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
+#ifdef CONFIG_X86_64
+	uint32_t      _pad3;         /* make it 64 byte aligned */
+#else
+	uint64_t      _pad3;         /* make it 64 byte aligned */
+#endif
+} __attribute__((__packed__));
+
 struct blkif_request {
 	uint8_t        operation;    /* BLKIF_OP_???                         */
 	union {
 		struct blkif_request_rw rw;
 		struct blkif_request_discard discard;
 		struct blkif_request_other other;
+		struct blkif_request_indirect indirect;
 	} u;
 } __attribute__((__packed__));
 
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 75271b9a8f61..7d28aff605c7 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -188,6 +188,11 @@ struct __name##_back_ring {						\
 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons)				\
     (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
 
+/* Ill-behaved frontend determination: Can there be this many requests? */
+#define RING_REQUEST_PROD_OVERFLOW(_r, _prod)               \
+    (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
+
+
 #define RING_PUSH_REQUESTS(_r) do {					\
     wmb(); /* back sees requests /before/ updated producer index */	\
     (_r)->sring->req_prod = (_r)->req_prod_pvt;				\