@@ -272,6 +272,10 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_SPARSEKMAP=m
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INTEL_LPSS_USB=m
+CONFIG_INTEL_VSC=m
+CONFIG_INTEL_VSC_ACE_DEBUG=m
+CONFIG_INTEL_VSC_PSE=m
CONFIG_INTERCONNECT=y
CONFIG_IOMMU_DEFAULT_DMA_LAZY=y
# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set
@@ -336,6 +340,7 @@ CONFIG_MFD_INTEL_PMT=m
CONFIG_MFD_IQS62X=m
CONFIG_MFD_JANZ_CMODIO=m
CONFIG_MFD_KEMPLD=m
+CONFIG_MFD_LJCA=m
CONFIG_MFD_LM3533=m
CONFIG_MFD_LP3943=m
CONFIG_MFD_MADERA=m
@@ -484,6 +489,7 @@ CONFIG_PINCTRL=y
CONFIG_PMIC_OPREGION=y
CONFIG_PM_DEVFREQ=y
CONFIG_POWERCAP=y
+CONFIG_POWER_CTRL_LOGIC=m
CONFIG_POWER_SUPPLY=y
CONFIG_PPP=y
CONFIG_PPS=y
@@ -286,6 +286,10 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_SPARSEKMAP=m
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_INTEL_LPSS_USB is not set
+# CONFIG_INTEL_VSC is not set
+# CONFIG_INTEL_VSC_ACE_DEBUG is not set
+# CONFIG_INTEL_VSC_PSE is not set
CONFIG_INTERCONNECT=y
# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set
CONFIG_IOMMU_DEFAULT_DMA_STRICT=y
@@ -350,6 +354,7 @@ CONFIG_MFD_INTEL_PMT=m
CONFIG_MFD_IQS62X=m
CONFIG_MFD_JANZ_CMODIO=m
CONFIG_MFD_KEMPLD=m
+# CONFIG_MFD_LJCA is not set
CONFIG_MFD_LM3533=m
CONFIG_MFD_LP3943=m
CONFIG_MFD_MADERA=m
@@ -503,6 +508,7 @@ CONFIG_PINCTRL=y
# CONFIG_PMIC_OPREGION is not set
CONFIG_PM_DEVFREQ=y
CONFIG_POWERCAP=y
+# CONFIG_POWER_CTRL_LOGIC is not set
CONFIG_POWER_SUPPLY=y
CONFIG_PPP=y
CONFIG_PPS=y
@@ -280,6 +280,10 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_SPARSEKMAP=m
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_INTEL_LPSS_USB is not set
+# CONFIG_INTEL_VSC is not set
+# CONFIG_INTEL_VSC_ACE_DEBUG is not set
+# CONFIG_INTEL_VSC_PSE is not set
CONFIG_INTERCONNECT=y
# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set
CONFIG_IOMMU_DEFAULT_DMA_STRICT=y
@@ -342,6 +346,7 @@ CONFIG_MFD_INTEL_PMT=m
CONFIG_MFD_IQS62X=m
CONFIG_MFD_JANZ_CMODIO=m
CONFIG_MFD_KEMPLD=m
+# CONFIG_MFD_LJCA is not set
CONFIG_MFD_LM3533=m
CONFIG_MFD_LP3943=m
CONFIG_MFD_MADERA=m
@@ -3977,6 +3977,7 @@ CONFIG_GPIO_ICH=m
CONFIG_GPIO_IT87=m
CONFIG_GPIO_JANZ_TTL=m
CONFIG_GPIO_KEMPLD=m
+CONFIG_GPIO_LJCA=m
CONFIG_GPIO_LOGICVC=m
CONFIG_GPIO_LP3943=m
CONFIG_GPIO_LP873X=m
@@ -4590,6 +4591,7 @@ CONFIG_I2C_HIX5HD2=m
CONFIG_I2C_IMX_LPI2C=m
CONFIG_I2C_ISMT=m
CONFIG_I2C_KEMPLD=m
+CONFIG_I2C_LJCA=m
CONFIG_I2C_MESON=m
CONFIG_I2C_MLXBF=m
CONFIG_I2C_MLXCPLD=m
@@ -4996,6 +4998,7 @@ CONFIG_INTEL_MEI=m
CONFIG_INTEL_MEI_HDCP=m
CONFIG_INTEL_MEI_ME=m
CONFIG_INTEL_MEI_TXE=m
+CONFIG_INTEL_MEI_VSC=m
CONFIG_INTEL_MEI_WDT=m
CONFIG_INTEL_MENLOW=m
CONFIG_INTEL_MRFLD_ADC=m
@@ -5044,6 +5047,8 @@ CONFIG_INTEL_TURBO_MAX_3=y
CONFIG_INTEL_TXT=y
CONFIG_INTEL_UNCORE_FREQ_CONTROL=m
CONFIG_INTEL_VBTN=m
+CONFIG_INTEL_VSC_ACE=m
+CONFIG_INTEL_VSC_CSI=m
CONFIG_INTEL_WMI=y
CONFIG_INTEL_WMI_SBL_FW_UPDATE=m
CONFIG_INTEL_WMI_THUNDERBOLT=m
@@ -10658,6 +10663,7 @@ CONFIG_SPI_IMX=m
# CONFIG_SPI_INTEL_SPI_PCI is not set
# CONFIG_SPI_INTEL_SPI_PLATFORM is not set
CONFIG_SPI_LANTIQ_SSC=m
+CONFIG_SPI_LJCA=m
CONFIG_SPI_LM70_LLP=m
CONFIG_SPI_LOOPBACK_TEST=m
CONFIG_SPI_MASTER=y
@@ -12139,6 +12145,7 @@ CONFIG_VIDEO_HDPVR=m
CONFIG_VIDEO_HEXIUM_GEMINI=m
CONFIG_VIDEO_HEXIUM_ORION=m
CONFIG_VIDEO_HI556=m
+CONFIG_VIDEO_HM11B1=m
CONFIG_VIDEO_I2C=m
CONFIG_VIDEO_IMX208=m
CONFIG_VIDEO_IMX214=m
@@ -12157,6 +12164,7 @@ CONFIG_VIDEO_IMX_CSI=m
CONFIG_VIDEO_IMX_MEDIA=m
CONFIG_VIDEO_IMX_PXP=m
CONFIG_VIDEO_IMX_VDOA=m
+CONFIG_VIDEO_INTEL_IPU6=m
CONFIG_VIDEO_IPU3_CIO2=m
CONFIG_VIDEO_IPU3_IMGU=m
CONFIG_VIDEO_IR_I2C=m
@@ -12193,6 +12201,8 @@ CONFIG_VIDEO_OMAP2_VOUT_VRFB=y
CONFIG_VIDEO_OMAP3=m
# CONFIG_VIDEO_OMAP3_DEBUG is not set
CONFIG_VIDEO_OMAP4=m
+CONFIG_VIDEO_OV01A10=m
+CONFIG_VIDEO_OV01A1S=m
CONFIG_VIDEO_OV02A10=m
CONFIG_VIDEO_OV13858=m
CONFIG_VIDEO_OV2640=m
@@ -278,6 +278,10 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_SPARSEKMAP=m
CONFIG_INPUT_TABLET=y
CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_INTEL_LPSS_USB is not set
+# CONFIG_INTEL_VSC is not set
+# CONFIG_INTEL_VSC_ACE_DEBUG is not set
+# CONFIG_INTEL_VSC_PSE is not set
CONFIG_INTERCONNECT=y
# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set
CONFIG_IOMMU_DEFAULT_DMA_STRICT=y
@@ -344,6 +348,7 @@ CONFIG_MFD_INTEL_PMT=m
CONFIG_MFD_IQS62X=m
CONFIG_MFD_JANZ_CMODIO=m
CONFIG_MFD_KEMPLD=m
+# CONFIG_MFD_LJCA is not set
CONFIG_MFD_LM3533=m
CONFIG_MFD_LP3943=m
CONFIG_MFD_MADERA=m
@@ -256,6 +256,9 @@ CONFIG_IMA_NG_TEMPLATE=y
# CONFIG_INPUT_SPARSEKMAP is not set
# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INTEL_VSC is not set
+# CONFIG_INTEL_VSC_ACE_DEBUG is not set
+# CONFIG_INTEL_VSC_PSE is not set
# CONFIG_INTERCONNECT is not set
# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set
CONFIG_IOMMU_DEFAULT_DMA_STRICT=y
@@ -353,6 +353,17 @@ config GPIO_IXP4XX
IXP4xx series of chips.
If unsure, say N.
+
+config GPIO_LJCA
+ tristate "INTEL La Jolla Cove Adapter GPIO support"
+ depends on MFD_LJCA
+ help
+ Select this option to enable GPIO driver for the INTEL
+ La Jolla Cove Adapter (LJCA) board.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio-ljca.
+
config GPIO_LOGICVC
tristate "Xylon LogiCVC GPIO support"
depends on MFD_SYSCON && OF
@@ -75,6 +75,7 @@ obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o
+obj-$(CONFIG_GPIO_LJCA) += gpio-ljca.o
obj-$(CONFIG_GPIO_LOGICVC) += gpio-logicvc.o
obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
obj-$(CONFIG_GPIO_LOONGSON) += gpio-loongson.o
new file mode 100644
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-GPIO driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/gpio/driver.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define GPIO_PAYLOAD_LEN(pin_num) \
+ (sizeof(struct gpio_packet) + (pin_num) * sizeof(struct gpio_op))
+
+/* GPIO commands */
+#define GPIO_CONFIG 1
+#define GPIO_READ 2
+#define GPIO_WRITE 3
+#define GPIO_INT_EVENT 4
+#define GPIO_INT_MASK 5
+#define GPIO_INT_UNMASK 6
+
+#define GPIO_CONF_DISABLE BIT(0)
+#define GPIO_CONF_INPUT BIT(1)
+#define GPIO_CONF_OUTPUT BIT(2)
+#define GPIO_CONF_PULLUP BIT(3)
+#define GPIO_CONF_PULLDOWN BIT(4)
+#define GPIO_CONF_DEFAULT BIT(5)
+#define GPIO_CONF_INTERRUPT BIT(6)
+#define GPIO_INT_TYPE BIT(7)
+
+#define GPIO_CONF_EDGE (1 << 7)
+#define GPIO_CONF_LEVEL (0 << 7)
+
+/* Intentional overlap with PULLUP / PULLDOWN */
+#define GPIO_CONF_SET BIT(3)
+#define GPIO_CONF_CLR BIT(4)
+
+struct gpio_op {
+ u8 index;
+ u8 value;
+} __packed;
+
+struct gpio_packet {
+ u8 num;
+ struct gpio_op item[];
+} __packed;
+
+struct ljca_gpio_dev {
+ struct platform_device *pdev;
+ struct gpio_chip gc;
+ struct ljca_gpio_info *ctr_info;
+ DECLARE_BITMAP(unmasked_irqs, MAX_GPIO_NUM);
+ DECLARE_BITMAP(enabled_irqs, MAX_GPIO_NUM);
+ DECLARE_BITMAP(reenable_irqs, MAX_GPIO_NUM);
+ u8 *connect_mode;
+ struct mutex irq_lock;
+ struct work_struct work;
+ struct mutex trans_lock;
+
+ u8 obuf[256];
+ u8 ibuf[256];
+};
+
+static bool ljca_gpio_valid(struct ljca_gpio_dev *ljca_gpio, int gpio_id)
+{
+ if (gpio_id >= ljca_gpio->ctr_info->num ||
+ !test_bit(gpio_id, ljca_gpio->ctr_info->valid_pin_map)) {
+ dev_err(&ljca_gpio->pdev->dev,
+ "invalid gpio gpio_id gpio_id:%d\n", gpio_id);
+ return false;
+ }
+
+ return true;
+}
+
+static int gpio_config(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id, u8 config)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ if (!ljca_gpio_valid(ljca_gpio, gpio_id))
+ return -EINVAL;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = config | ljca_gpio->connect_mode[gpio_id];
+ packet->num = 1;
+
+ ret = ljca_transfer(ljca_gpio->pdev, GPIO_CONFIG, packet,
+ GPIO_PAYLOAD_LEN(packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static int ljca_gpio_read(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ struct gpio_packet *ack_packet;
+ int ret;
+ int ibuf_len;
+
+ if (!ljca_gpio_valid(ljca_gpio, gpio_id))
+ return -EINVAL;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ ret = ljca_transfer(ljca_gpio->pdev, GPIO_READ, packet,
+ GPIO_PAYLOAD_LEN(packet->num), ljca_gpio->ibuf,
+ &ibuf_len);
+
+ ack_packet = (struct gpio_packet *)ljca_gpio->ibuf;
+ if (ret || !ibuf_len || ack_packet->num != packet->num) {
+ dev_err(&ljca_gpio->pdev->dev, "%s failed gpio_id:%d ret %d %d",
+ __func__, gpio_id, ret, ack_packet->num);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return -EIO;
+ }
+
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return (ack_packet->item[0].value > 0) ? 1 : 0;
+}
+
+static int ljca_gpio_write(struct ljca_gpio_dev *ljca_gpio, u8 gpio_id,
+ int value)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = (value & 1);
+
+ ret = ljca_transfer(ljca_gpio->pdev, GPIO_WRITE, packet,
+ GPIO_PAYLOAD_LEN(packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static int ljca_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ return ljca_gpio_read(ljca_gpio, offset);
+}
+
+static void ljca_gpio_set_value(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ int ret;
+
+ ret = ljca_gpio_write(ljca_gpio, offset, val);
+ if (ret)
+ dev_err(chip->parent,
+ "%s offset:%d val:%d set value failed %d\n", __func__,
+ offset, val, ret);
+}
+
+static int ljca_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ u8 config = GPIO_CONF_INPUT | GPIO_CONF_CLR;
+
+ return gpio_config(ljca_gpio, offset, config);
+}
+
+static int ljca_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+ u8 config = GPIO_CONF_OUTPUT | GPIO_CONF_CLR;
+ int ret;
+
+ ret = gpio_config(ljca_gpio, offset, config);
+ if (ret)
+ return ret;
+
+ ljca_gpio_set_value(chip, offset, val);
+ return 0;
+}
+
+static int ljca_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(chip);
+
+ if (!ljca_gpio_valid(ljca_gpio, offset))
+ return -EINVAL;
+
+ ljca_gpio->connect_mode[offset] = 0;
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ljca_gpio->connect_mode[offset] |= GPIO_CONF_PULLUP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ljca_gpio->connect_mode[offset] |= GPIO_CONF_PULLDOWN;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ case PIN_CONFIG_PERSIST_STATE:
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int ljca_enable_irq(struct ljca_gpio_dev *ljca_gpio, int gpio_id,
+ bool enable)
+{
+ struct gpio_packet *packet = (struct gpio_packet *)ljca_gpio->obuf;
+ int ret;
+
+ mutex_lock(&ljca_gpio->trans_lock);
+ packet->num = 1;
+ packet->item[0].index = gpio_id;
+ packet->item[0].value = 0;
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d", __func__, gpio_id);
+
+ ret = ljca_transfer(ljca_gpio->pdev,
+ enable == true ? GPIO_INT_UNMASK : GPIO_INT_MASK,
+ packet, GPIO_PAYLOAD_LEN(packet->num), NULL, NULL);
+ mutex_unlock(&ljca_gpio->trans_lock);
+ return ret;
+}
+
+static void ljca_gpio_async(struct work_struct *work)
+{
+ struct ljca_gpio_dev *ljca_gpio =
+ container_of(work, struct ljca_gpio_dev, work);
+ int gpio_id;
+ int unmasked;
+
+ for_each_set_bit (gpio_id, ljca_gpio->reenable_irqs,
+ ljca_gpio->gc.ngpio) {
+ clear_bit(gpio_id, ljca_gpio->reenable_irqs);
+ unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs);
+ if (unmasked)
+ ljca_enable_irq(ljca_gpio, gpio_id, true);
+ }
+}
+
+void ljca_gpio_event_cb(struct platform_device *pdev, u8 cmd,
+ const void *evt_data, int len)
+{
+ const struct gpio_packet *packet = evt_data;
+ struct ljca_gpio_dev *ljca_gpio = platform_get_drvdata(pdev);
+ int i;
+ int irq;
+
+ if (cmd != GPIO_INT_EVENT)
+ return;
+
+ for (i = 0; i < packet->num; i++) {
+ irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
+ if (!irq) {
+ dev_err(ljca_gpio->gc.parent,
+ "gpio_id %d not mapped to IRQ\n",
+ packet->item[i].index);
+ return;
+ }
+
+ generic_handle_irq(irq);
+
+ set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
+ dev_dbg(ljca_gpio->gc.parent, "%s got one interrupt %d %d %d\n",
+ __func__, i, packet->item[i].index,
+ packet->item[i].value);
+ }
+
+ schedule_work(&ljca_gpio->work);
+}
+
+static void ljca_irq_unmask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d", __func__, gpio_id);
+ set_bit(gpio_id, ljca_gpio->unmasked_irqs);
+}
+
+static void ljca_irq_mask(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d", __func__, gpio_id);
+ clear_bit(gpio_id, ljca_gpio->unmasked_irqs);
+}
+
+static int ljca_irq_set_type(struct irq_data *irqd, unsigned type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+
+ ljca_gpio->connect_mode[gpio_id] = GPIO_CONF_INTERRUPT;
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_LEVEL | GPIO_CONF_PULLUP;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_LEVEL | GPIO_CONF_PULLDOWN;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_EDGE | GPIO_CONF_PULLUP;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ ljca_gpio->connect_mode[gpio_id] |=
+ GPIO_CONF_EDGE | GPIO_CONF_PULLDOWN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(ljca_gpio->gc.parent, "%s %d %x\n", __func__, gpio_id,
+ ljca_gpio->connect_mode[gpio_id]);
+ return 0;
+}
+
+static void ljca_irq_bus_lock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+
+ mutex_lock(&ljca_gpio->irq_lock);
+}
+
+static void ljca_irq_bus_unlock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct ljca_gpio_dev *ljca_gpio = gpiochip_get_data(gc);
+ int gpio_id = irqd_to_hwirq(irqd);
+ int enabled;
+ int unmasked;
+
+ enabled = test_bit(gpio_id, ljca_gpio->enabled_irqs);
+ unmasked = test_bit(gpio_id, ljca_gpio->unmasked_irqs);
+ dev_dbg(ljca_gpio->gc.parent, "%s %d %d %d\n", __func__, gpio_id,
+ enabled, unmasked);
+
+ if (enabled != unmasked) {
+ if (unmasked) {
+ gpio_config(ljca_gpio, gpio_id, 0);
+ ljca_enable_irq(ljca_gpio, gpio_id, true);
+ set_bit(gpio_id, ljca_gpio->enabled_irqs);
+ } else {
+ ljca_enable_irq(ljca_gpio, gpio_id, false);
+ clear_bit(gpio_id, ljca_gpio->enabled_irqs);
+ }
+ }
+
+ mutex_unlock(&ljca_gpio->irq_lock);
+}
+
+static unsigned int ljca_irq_startup(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+
+ return gpiochip_lock_as_irq(gc, irqd_to_hwirq(irqd));
+}
+
+static void ljca_irq_shutdown(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+
+ gpiochip_unlock_as_irq(gc, irqd_to_hwirq(irqd));
+}
+
+static struct irq_chip ljca_gpio_irqchip = {
+ .name = "ljca-irq",
+ .irq_mask = ljca_irq_mask,
+ .irq_unmask = ljca_irq_unmask,
+ .irq_set_type = ljca_irq_set_type,
+ .irq_bus_lock = ljca_irq_bus_lock,
+ .irq_bus_sync_unlock = ljca_irq_bus_unlock,
+ .irq_startup = ljca_irq_startup,
+ .irq_shutdown = ljca_irq_shutdown,
+};
+
+static int ljca_gpio_probe(struct platform_device *pdev)
+{
+ struct ljca_gpio_dev *ljca_gpio;
+ struct ljca_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct gpio_irq_chip *girq;
+
+ ljca_gpio = devm_kzalloc(&pdev->dev, sizeof(*ljca_gpio), GFP_KERNEL);
+ if (!ljca_gpio)
+ return -ENOMEM;
+
+ ljca_gpio->ctr_info = &pdata->gpio_info;
+ ljca_gpio->connect_mode =
+ devm_kcalloc(&pdev->dev, ljca_gpio->ctr_info->num,
+ sizeof(*ljca_gpio->connect_mode), GFP_KERNEL);
+ if (!ljca_gpio->connect_mode)
+ return -ENOMEM;
+
+ mutex_init(&ljca_gpio->irq_lock);
+ mutex_init(&ljca_gpio->trans_lock);
+ ljca_gpio->pdev = pdev;
+ ljca_gpio->gc.direction_input = ljca_gpio_direction_input;
+ ljca_gpio->gc.direction_output = ljca_gpio_direction_output;
+ ljca_gpio->gc.get = ljca_gpio_get_value;
+ ljca_gpio->gc.set = ljca_gpio_set_value;
+ ljca_gpio->gc.set_config = ljca_gpio_set_config;
+ ljca_gpio->gc.can_sleep = true;
+ ljca_gpio->gc.parent = &pdev->dev;
+
+ ljca_gpio->gc.base = -1;
+ ljca_gpio->gc.ngpio = ljca_gpio->ctr_info->num;
+ ljca_gpio->gc.label = "ljca-gpio";
+ ljca_gpio->gc.owner = THIS_MODULE;
+
+ platform_set_drvdata(pdev, ljca_gpio);
+ ljca_register_event_cb(pdev, ljca_gpio_event_cb);
+
+ girq = &ljca_gpio->gc.irq;
+ girq->chip = &ljca_gpio_irqchip;
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_simple_irq;
+
+ INIT_WORK(&ljca_gpio->work, ljca_gpio_async);
+ return devm_gpiochip_add_data(&pdev->dev, &ljca_gpio->gc, ljca_gpio);
+}
+
+static int ljca_gpio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver ljca_gpio_driver = {
+ .driver.name = "ljca-gpio",
+ .probe = ljca_gpio_probe,
+ .remove = ljca_gpio_remove,
+};
+
+module_platform_driver(ljca_gpio_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-GPIO driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ljca-gpio");
@@ -1308,6 +1308,16 @@ config I2C_ICY
If you have a 2019 edition board with an LTC2990 sensor at address
0x4c, loading the module 'ltc2990' is sufficient to enable it.
+config I2C_LJCA
+ tristate "I2C functionality of INTEL La Jolla Cove Adapter"
+ depends on MFD_LJCA
+ help
+ If you say yes to this option, I2C functionality support of INTEL
+ La Jolla Cove Adapter (LJCA) will be included.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-ljca.
+
config I2C_MLXCPLD
tristate "Mellanox I2C driver"
depends on X86_64 || COMPILE_TEST
@@ -137,6 +137,7 @@ obj-$(CONFIG_I2C_BRCMSTB) += i2c-brcmstb.o
obj-$(CONFIG_I2C_CROS_EC_TUNNEL) += i2c-cros-ec-tunnel.o
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
obj-$(CONFIG_I2C_ICY) += i2c-icy.o
+obj-$(CONFIG_I2C_LJCA) += i2c-ljca.o
obj-$(CONFIG_I2C_MLXBF) += i2c-mlxbf.o
obj-$(CONFIG_I2C_MLXCPLD) += i2c-mlxcpld.o
obj-$(CONFIG_I2C_OPAL) += i2c-opal.o
new file mode 100644
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-I2C driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/* I2C commands */
+enum i2c_cmd {
+ I2C_INIT = 1,
+ I2C_XFER,
+ I2C_START,
+ I2C_STOP,
+ I2C_READ,
+ I2C_WRITE,
+};
+
+enum i2c_address_mode {
+ I2C_ADDRESS_MODE_7BIT,
+ I2C_ADDRESS_MODE_10BIT,
+};
+
+enum xfer_type {
+ READ_XFER_TYPE,
+ WRITE_XFER_TYPE,
+};
+
+#define DEFAULT_I2C_CONTROLLER_ID 1
+#define DEFAULT_I2C_CAPACITY 0
+#define DEFAULT_I2C_INTR_PIN 0
+
+/* I2C r/w Flags */
+#define I2C_SLAVE_TRANSFER_WRITE (0)
+#define I2C_SLAVE_TRANSFER_READ (1)
+
+/* i2c init flags */
+#define I2C_INIT_FLAG_MODE_MASK (0x1 << 0)
+#define I2C_INIT_FLAG_MODE_POLLING (0x0 << 0)
+#define I2C_INIT_FLAG_MODE_INTERRUPT (0x1 << 0)
+
+#define I2C_FLAG_ADDR_16BIT (0x1 << 0)
+
+#define I2C_INIT_FLAG_FREQ_MASK (0x3 << 1)
+#define I2C_FLAG_FREQ_100K (0x0 << 1)
+#define I2C_FLAG_FREQ_400K (0x1 << 1)
+#define I2C_FLAG_FREQ_1M (0x2 << 1)
+
+/* I2C Transfer */
+struct i2c_xfer {
+ u8 id;
+ u8 slave;
+ u16 flag; /* speed, 8/16bit addr, addr increase, etc */
+ u16 addr;
+ u16 len;
+ u8 data[];
+} __packed;
+
+/* I2C raw commands: Init/Start/Read/Write/Stop */
+struct i2c_rw_packet {
+ u8 id;
+ __le16 len;
+ u8 data[];
+} __packed;
+
+#define LJCA_I2C_MAX_XFER_SIZE 256
+#define LJCA_I2C_BUF_SIZE \
+ (LJCA_I2C_MAX_XFER_SIZE + sizeof(struct i2c_rw_packet))
+
+struct ljca_i2c_dev {
+ struct platform_device *pdev;
+ struct ljca_i2c_info *ctr_info;
+ struct i2c_adapter adap;
+
+ u8 obuf[LJCA_I2C_BUF_SIZE];
+ u8 ibuf[LJCA_I2C_BUF_SIZE];
+};
+
+static u8 ljca_i2c_format_slave_addr(u8 slave_addr, enum i2c_address_mode mode)
+{
+ if (mode == I2C_ADDRESS_MODE_7BIT)
+ return slave_addr << 1;
+
+ return 0xFF;
+}
+
+static int ljca_i2c_init(struct ljca_i2c_dev *ljca_i2c, u8 id)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = id;
+ w_packet->len = cpu_to_le16(1);
+ w_packet->data[0] = I2C_FLAG_FREQ_400K;
+
+ return ljca_transfer(ljca_i2c->pdev, I2C_INIT, w_packet,
+ sizeof(*w_packet) + 1, NULL, NULL);
+}
+
+static int ljca_i2c_start(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr,
+ enum xfer_type type)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ret;
+ int ibuf_len;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(1);
+ w_packet->data[0] =
+ ljca_i2c_format_slave_addr(slave_addr, I2C_ADDRESS_MODE_7BIT);
+ w_packet->data[0] |= (type == READ_XFER_TYPE) ?
+ I2C_SLAVE_TRANSFER_READ :
+ I2C_SLAVE_TRANSFER_WRITE;
+
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_START, w_packet,
+ sizeof(*w_packet) + 1, r_packet, &ibuf_len);
+
+ if (ret || ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) < 0 ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c start failed len:%d id:%d %d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ljca_i2c_stop(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ret;
+ int ibuf_len;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(1);
+ w_packet->data[0] = 0;
+
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_STOP, w_packet,
+ sizeof(*w_packet) + 1, r_packet, &ibuf_len);
+
+ if (ret || ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) < 0 ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c stop failed len:%d id:%d %d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ljca_i2c_pure_read(struct ljca_i2c_dev *ljca_i2c, u8 *data, int len)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ibuf_len;
+ int ret;
+
+ if (len > LJCA_I2C_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(len);
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_READ, w_packet,
+ sizeof(*w_packet) + 1, r_packet, &ibuf_len);
+ if (ret) {
+ dev_err(&ljca_i2c->adap.dev, "I2C_READ failed ret:%d\n", ret);
+ return ret;
+ }
+
+ if (ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) != len ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c raw read failed len:%d id:%d %d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ memcpy(data, r_packet->data, len);
+
+ return 0;
+}
+
+static int ljca_i2c_read(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr, u8 *data,
+ u8 len)
+{
+ int ret;
+
+ ret = ljca_i2c_start(ljca_i2c, slave_addr, READ_XFER_TYPE);
+ if (ret)
+ return ret;
+
+ ret = ljca_i2c_pure_read(ljca_i2c, data, len);
+ if (ret) {
+ dev_err(&ljca_i2c->adap.dev, "i2c raw read failed ret:%d\n",
+ ret);
+
+ return ret;
+ }
+
+ return ljca_i2c_stop(ljca_i2c, slave_addr);
+}
+
+static int ljca_i2c_pure_write(struct ljca_i2c_dev *ljca_i2c, u8 *data, u8 len)
+{
+ struct i2c_rw_packet *w_packet = (struct i2c_rw_packet *)ljca_i2c->obuf;
+ struct i2c_rw_packet *r_packet = (struct i2c_rw_packet *)ljca_i2c->ibuf;
+ int ret;
+ int ibuf_len;
+
+ if (len > LJCA_I2C_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ memset(w_packet, 0, sizeof(*w_packet));
+ w_packet->id = ljca_i2c->ctr_info->id;
+ w_packet->len = cpu_to_le16(len);
+ memcpy(w_packet->data, data, len);
+
+ ret = ljca_transfer(ljca_i2c->pdev, I2C_WRITE, w_packet,
+ sizeof(*w_packet) + w_packet->len, r_packet,
+ &ibuf_len);
+
+ if (ret || ibuf_len < sizeof(*r_packet))
+ return -EIO;
+
+ if ((s16)le16_to_cpu(r_packet->len) != len ||
+ r_packet->id != w_packet->id) {
+ dev_err(&ljca_i2c->adap.dev,
+ "i2c write failed len:%d id:%d/%d\n",
+ (s16)le16_to_cpu(r_packet->len), r_packet->id,
+ w_packet->id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int ljca_i2c_write(struct ljca_i2c_dev *ljca_i2c, u8 slave_addr,
+ u8 *data, u8 len)
+{
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ ret = ljca_i2c_start(ljca_i2c, slave_addr, WRITE_XFER_TYPE);
+ if (ret)
+ return ret;
+
+ ret = ljca_i2c_pure_write(ljca_i2c, data, len);
+ if (ret)
+ return ret;
+
+ return ljca_i2c_stop(ljca_i2c, slave_addr);
+}
+
+static int ljca_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msg,
+ int num)
+{
+ struct ljca_i2c_dev *ljca_i2c;
+ struct i2c_msg *cur_msg;
+ int i, ret;
+
+ ljca_i2c = i2c_get_adapdata(adapter);
+ if (!ljca_i2c)
+ return -EINVAL;
+
+ for (i = 0; i < num; i++) {
+ cur_msg = &msg[i];
+ dev_dbg(&adapter->dev, "i:%d msg:(%d %d)\n", i, cur_msg->flags,
+ cur_msg->len);
+ if (cur_msg->flags & I2C_M_RD)
+ ret = ljca_i2c_read(ljca_i2c, cur_msg->addr,
+ cur_msg->buf, cur_msg->len);
+
+ else
+ ret = ljca_i2c_write(ljca_i2c, cur_msg->addr,
+ cur_msg->buf, cur_msg->len);
+
+ if (ret)
+ return ret;
+ }
+
+ return num;
+}
+
+static u32 ljca_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_adapter_quirks ljca_i2c_quirks = {
+ .max_read_len = LJCA_I2C_MAX_XFER_SIZE,
+ .max_write_len = LJCA_I2C_MAX_XFER_SIZE,
+};
+
+static const struct i2c_algorithm ljca_i2c_algo = {
+ .master_xfer = ljca_i2c_xfer,
+ .functionality = ljca_i2c_func,
+};
+
+static void try_bind_acpi(struct platform_device *pdev,
+ struct ljca_i2c_dev *ljca_i2c)
+{
+ struct acpi_device *parent, *child;
+ struct acpi_device *cur = ACPI_COMPANION(&pdev->dev);
+ const char *hid1;
+ const char *uid1;
+ char uid2[2] = { 0 };
+
+ if (!cur)
+ return;
+
+ hid1 = acpi_device_hid(cur);
+ uid1 = acpi_device_uid(cur);
+ snprintf(uid2, sizeof(uid2), "%d", ljca_i2c->ctr_info->id);
+
+ dev_dbg(&pdev->dev, "hid %s uid %s new uid%s\n", hid1, uid1, uid2);
+ /*
+ * If the pdev is bound to the right acpi device, just forward it to the
+ * adapter. Otherwise, we find that of current adapter manually.
+ */
+ if (!strcmp(uid1, uid2)) {
+ ACPI_COMPANION_SET(&ljca_i2c->adap.dev, cur);
+ return;
+ }
+
+ parent = ACPI_COMPANION(pdev->dev.parent);
+ if (!parent)
+ return;
+
+ list_for_each_entry(child, &parent->children, node) {
+ if (acpi_dev_hid_uid_match(child, hid1, uid2)) {
+ ACPI_COMPANION_SET(&ljca_i2c->adap.dev, child);
+ return;
+ }
+ }
+}
+
+static int ljca_i2c_probe(struct platform_device *pdev)
+{
+ struct ljca_i2c_dev *ljca_i2c;
+ struct ljca_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ ljca_i2c = devm_kzalloc(&pdev->dev, sizeof(*ljca_i2c), GFP_KERNEL);
+ if (!ljca_i2c)
+ return -ENOMEM;
+
+ ljca_i2c->pdev = pdev;
+ ljca_i2c->ctr_info = &pdata->i2c_info;
+
+ ljca_i2c->adap.owner = THIS_MODULE;
+ ljca_i2c->adap.class = I2C_CLASS_HWMON;
+ ljca_i2c->adap.algo = &ljca_i2c_algo;
+ ljca_i2c->adap.dev.parent = &pdev->dev;
+
+ try_bind_acpi(pdev, ljca_i2c);
+
+ ljca_i2c->adap.dev.of_node = pdev->dev.of_node;
+ i2c_set_adapdata(&ljca_i2c->adap, ljca_i2c);
+ snprintf(ljca_i2c->adap.name, sizeof(ljca_i2c->adap.name), "%s-%s-%d",
+ "ljca-i2c", dev_name(pdev->dev.parent),
+ ljca_i2c->ctr_info->id);
+
+ platform_set_drvdata(pdev, ljca_i2c);
+
+ ret = ljca_i2c_init(ljca_i2c, ljca_i2c->ctr_info->id);
+ if (ret) {
+ dev_err(&pdev->dev, "i2c init failed id:%d\n",
+ ljca_i2c->ctr_info->id);
+ return -EIO;
+ }
+
+ return i2c_add_adapter(&ljca_i2c->adap);
+}
+
+static int ljca_i2c_remove(struct platform_device *pdev)
+{
+ struct ljca_i2c_dev *ljca_i2c = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&ljca_i2c->adap);
+
+ return 0;
+}
+
+static struct platform_driver ljca_i2c_driver = {
+ .driver.name = "ljca-i2c",
+ .probe = ljca_i2c_probe,
+ .remove = ljca_i2c_remove,
+};
+
+module_platform_driver(ljca_i2c_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-I2C driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ljca-i2c");
@@ -1390,6 +1390,61 @@ config VIDEO_S5C73M3
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
+config POWER_CTRL_LOGIC
+ tristate "power control logic driver"
+ depends on GPIO_ACPI
+ help
+ This is a power control logic driver for sensor, the design
+ depends on camera sensor connections.
+ This driver controls power by getting and using managed GPIO
+ pins from ACPI config for sensors, such as HM11B1, OV01A1S.
+
+ To compile this driver as a module, choose M here: the
+ module will be called power_ctrl_logic.
+
+config VIDEO_OV01A10
+ tristate "OmniVision OV01A10 sensor support"
+ depends on VIDEO_V4L2 && I2C
+ depends on ACPI || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV01A10 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov01a10.
+
+config VIDEO_OV01A1S
+ tristate "OmniVision OV01A1S sensor support"
+ depends on POWER_CTRL_LOGIC
+ depends on VIDEO_V4L2 && I2C
+ depends on ACPI || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV01A1S camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov01a1s.
+
+config VIDEO_HM11B1
+ tristate "Himax HM11B1 sensor support"
+ depends on POWER_CTRL_LOGIC
+ depends on VIDEO_V4L2 && I2C
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Himax
+ HM11B1 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hm11b1.
+
endmenu
menu "Lens drivers"
@@ -134,3 +134,8 @@ obj-$(CONFIG_VIDEO_RDACM20) += rdacm20.o
obj-$(CONFIG_VIDEO_RDACM21) += rdacm21.o
obj-$(CONFIG_VIDEO_ST_MIPID02) += st-mipid02.o
obj-$(CONFIG_SDR_MAX2175) += max2175.o
+
+obj-$(CONFIG_VIDEO_HM11B1) += hm11b1.o
+obj-$(CONFIG_VIDEO_OV01A1S) += ov01a1s.o
+obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
+obj-$(CONFIG_POWER_CTRL_LOGIC) += power_ctrl_logic.o
new file mode 100644
@@ -0,0 +1,1102 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020-2021 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include "power_ctrl_logic.h"
+
+#define HM11B1_LINK_FREQ_384MHZ 384000000ULL
+#define HM11B1_SCLK 72000000LL
+#define HM11B1_MCLK 19200000
+#define HM11B1_DATA_LANES 1
+#define HM11B1_RGB_DEPTH 10
+
+#define HM11B1_REG_CHIP_ID 0x0000
+#define HM11B1_CHIP_ID 0x11B1
+
+#define HM11B1_REG_MODE_SELECT 0x0100
+#define HM11B1_MODE_STANDBY 0x00
+#define HM11B1_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define HM11B1_REG_VTS 0x3402
+#define HM11B1_VTS_DEF 0x037d
+#define HM11B1_VTS_MIN 0x0346
+#define HM11B1_VTS_MAX 0xffff
+
+/* horizontal-timings from sensor */
+#define HM11B1_REG_HTS 0x3404
+
+/* Exposure controls from sensor */
+#define HM11B1_REG_EXPOSURE 0x0202
+#define HM11B1_EXPOSURE_MIN 2
+#define HM11B1_EXPOSURE_MAX_MARGIN 2
+#define HM11B1_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HM11B1_REG_ANALOG_GAIN 0x0205
+#define HM11B1_REG_ANALOG_GAIN_IR 0x0206
+#define HM11B1_ANAL_GAIN_MIN 0
+#define HM11B1_ANAL_GAIN_MAX 0xFF
+#define HM11B1_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define HM11B1_REG_DGTL_GAIN 0x0207
+#define HM11B1_REG_DGTL_GAIN_IR 0x0209
+#define HM11B1_DGTL_GAIN_MIN 0x0
+#define HM11B1_DGTL_GAIN_MAX 0x3FF
+#define HM11B1_DGTL_GAIN_STEP 1
+#define HM11B1_DGTL_GAIN_DEFAULT 0x100
+/* register update control */
+#define HM11B1_REG_COMMAND_UPDATE 0x104
+
+/* Test Pattern Control */
+#define HM11B1_REG_TEST_PATTERN 0x0601
+#define HM11B1_TEST_PATTERN_ENABLE 1
+#define HM11B1_TEST_PATTERN_BAR_SHIFT 1
+
+enum {
+ HM11B1_LINK_FREQ_384MHZ_INDEX,
+};
+
+struct hm11b1_reg {
+ u16 address;
+ u8 val;
+};
+
+struct hm11b1_reg_list {
+ u32 num_of_regs;
+ const struct hm11b1_reg *regs;
+};
+
+struct hm11b1_link_freq_config {
+ const struct hm11b1_reg_list reg_list;
+};
+
+struct hm11b1_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct hm11b1_reg_list reg_list;
+};
+
+static const struct hm11b1_reg mipi_data_rate_384mbps[] = {
+};
+
+//RAW 10bit 1292x800_30fps_MIPI 384Mbps/lane
+static const struct hm11b1_reg sensor_1292x800_30fps_setting[] = {
+ {0x0103, 0x00},
+ {0x0102, 0x01},
+ {0x0202, 0x03},
+ {0x0203, 0x7C},
+ {0x0205, 0x20},
+ {0x0207, 0x01},
+ {0x0208, 0x00},
+ {0x0209, 0x01},
+ {0x020A, 0x00},
+ {0x0300, 0x91},
+ {0x0301, 0x0A},
+ {0x0302, 0x02},
+ {0x0303, 0x2E},
+ {0x0304, 0x43},
+ {0x0306, 0x00},
+ {0x0307, 0x00},
+ {0x0340, 0x03},
+ {0x0341, 0x60},
+ {0x0342, 0x05},
+ {0x0343, 0xA0},
+ {0x0344, 0x00},
+ {0x0345, 0x00},
+ {0x0346, 0x03},
+ {0x0347, 0x2F},
+ {0x0350, 0xFF},
+ {0x0351, 0x00},
+ {0x0352, 0x00},
+ {0x0370, 0x00},
+ {0x0371, 0x00},
+ {0x0380, 0x00},
+ {0x0381, 0x00},
+ {0x0382, 0x00},
+ {0x1000, 0xC3},
+ {0x1001, 0xD0},
+ {0x100A, 0x13},
+ {0x2000, 0x00},
+ {0x2061, 0x01},
+ {0x2062, 0x00},
+ {0x2063, 0xC8},
+ {0x2100, 0x03},
+ {0x2101, 0xF0},
+ {0x2102, 0xF0},
+ {0x2103, 0x01},
+ {0x2104, 0x10},
+ {0x2105, 0x10},
+ {0x2106, 0x02},
+ {0x2107, 0x0A},
+ {0x2108, 0x10},
+ {0x2109, 0x15},
+ {0x210A, 0x1A},
+ {0x210B, 0x20},
+ {0x210C, 0x08},
+ {0x210D, 0x0A},
+ {0x210E, 0x0F},
+ {0x210F, 0x12},
+ {0x2110, 0x1C},
+ {0x2111, 0x20},
+ {0x2112, 0x23},
+ {0x2113, 0x2A},
+ {0x2114, 0x30},
+ {0x2115, 0x10},
+ {0x2116, 0x00},
+ {0x2117, 0x01},
+ {0x2118, 0x00},
+ {0x2119, 0x06},
+ {0x211A, 0x00},
+ {0x211B, 0x00},
+ {0x2615, 0x08},
+ {0x2616, 0x00},
+ {0x2700, 0x01},
+ {0x2711, 0x01},
+ {0x272F, 0x01},
+ {0x2800, 0x29},
+ {0x2821, 0xCE},
+ {0x2839, 0x27},
+ {0x283A, 0x01},
+ {0x2842, 0x01},
+ {0x2843, 0x00},
+ {0x3022, 0x11},
+ {0x3024, 0x30},
+ {0x3025, 0x12},
+ {0x3026, 0x00},
+ {0x3027, 0x81},
+ {0x3028, 0x01},
+ {0x3029, 0x00},
+ {0x302A, 0x30},
+ {0x3030, 0x00},
+ {0x3032, 0x00},
+ {0x3035, 0x01},
+ {0x303E, 0x00},
+ {0x3051, 0x00},
+ {0x3082, 0x0E},
+ {0x3084, 0x0D},
+ {0x30A8, 0x03},
+ {0x30C4, 0xA0},
+ {0x30D5, 0xC1},
+ {0x30D8, 0x00},
+ {0x30D9, 0x0D},
+ {0x30DB, 0xC2},
+ {0x30DE, 0x25},
+ {0x30E1, 0xC3},
+ {0x30E4, 0x25},
+ {0x30E7, 0xC4},
+ {0x30EA, 0x25},
+ {0x30ED, 0xC5},
+ {0x30F0, 0x25},
+ {0x30F2, 0x0C},
+ {0x30F3, 0x85},
+ {0x30F6, 0x25},
+ {0x30F8, 0x0C},
+ {0x30F9, 0x05},
+ {0x30FB, 0x40},
+ {0x30FC, 0x25},
+ {0x30FD, 0x54},
+ {0x30FE, 0x0C},
+ {0x3100, 0xC2},
+ {0x3103, 0x00},
+ {0x3104, 0x2B},
+ {0x3106, 0xC3},
+ {0x3109, 0x25},
+ {0x310C, 0xC4},
+ {0x310F, 0x25},
+ {0x3112, 0xC5},
+ {0x3115, 0x25},
+ {0x3117, 0x0C},
+ {0x3118, 0x85},
+ {0x311B, 0x25},
+ {0x311D, 0x0C},
+ {0x311E, 0x05},
+ {0x3121, 0x25},
+ {0x3123, 0x0C},
+ {0x3124, 0x0D},
+ {0x3126, 0x40},
+ {0x3127, 0x25},
+ {0x3128, 0x54},
+ {0x3129, 0x0C},
+ {0x3130, 0x20},
+ {0x3134, 0x60},
+ {0x3135, 0xC2},
+ {0x3139, 0x12},
+ {0x313A, 0x07},
+ {0x313F, 0x52},
+ {0x3140, 0x34},
+ {0x3141, 0x2E},
+ {0x314F, 0x07},
+ {0x3151, 0x47},
+ {0x3153, 0xB0},
+ {0x3154, 0x4A},
+ {0x3155, 0xC0},
+ {0x3157, 0x55},
+ {0x3158, 0x01},
+ {0x3165, 0xFF},
+ {0x316B, 0x12},
+ {0x316E, 0x12},
+ {0x3176, 0x12},
+ {0x3178, 0x01},
+ {0x317C, 0x10},
+ {0x317D, 0x05},
+ {0x317F, 0x07},
+ {0x3182, 0x07},
+ {0x3183, 0x11},
+ {0x3184, 0x88},
+ {0x3186, 0x28},
+ {0x3191, 0x00},
+ {0x3192, 0x20},
+ {0x3400, 0x48},
+ {0x3401, 0x00},
+ {0x3402, 0x06},
+ {0x3403, 0xFA},
+ {0x3404, 0x05},
+ {0x3405, 0x40},
+ {0x3406, 0x00},
+ {0x3407, 0x00},
+ {0x3408, 0x03},
+ {0x3409, 0x2F},
+ {0x340A, 0x00},
+ {0x340B, 0x00},
+ {0x340C, 0x00},
+ {0x340D, 0x00},
+ {0x340E, 0x00},
+ {0x340F, 0x00},
+ {0x3410, 0x00},
+ {0x3411, 0x01},
+ {0x3412, 0x00},
+ {0x3413, 0x03},
+ {0x3414, 0xB0},
+ {0x3415, 0x4A},
+ {0x3416, 0xC0},
+ {0x3418, 0x55},
+ {0x3419, 0x03},
+ {0x341B, 0x7D},
+ {0x341C, 0x00},
+ {0x341F, 0x03},
+ {0x3420, 0x00},
+ {0x3421, 0x02},
+ {0x3422, 0x00},
+ {0x3423, 0x02},
+ {0x3424, 0x01},
+ {0x3425, 0x02},
+ {0x3426, 0x00},
+ {0x3427, 0xA2},
+ {0x3428, 0x01},
+ {0x3429, 0x06},
+ {0x342A, 0xF8},
+ {0x3440, 0x01},
+ {0x3441, 0xBE},
+ {0x3442, 0x02},
+ {0x3443, 0x18},
+ {0x3444, 0x03},
+ {0x3445, 0x0C},
+ {0x3446, 0x06},
+ {0x3447, 0x18},
+ {0x3448, 0x09},
+ {0x3449, 0x24},
+ {0x344A, 0x08},
+ {0x344B, 0x08},
+ {0x345C, 0x00},
+ {0x345D, 0x44},
+ {0x345E, 0x02},
+ {0x345F, 0x43},
+ {0x3460, 0x04},
+ {0x3461, 0x3B},
+ {0x3466, 0xF8},
+ {0x3467, 0x43},
+ {0x347D, 0x02},
+ {0x3483, 0x05},
+ {0x3484, 0x0C},
+ {0x3485, 0x03},
+ {0x3486, 0x20},
+ {0x3487, 0x00},
+ {0x3488, 0x00},
+ {0x3489, 0x00},
+ {0x348A, 0x09},
+ {0x348B, 0x00},
+ {0x348C, 0x00},
+ {0x348D, 0x02},
+ {0x348E, 0x01},
+ {0x348F, 0x40},
+ {0x3490, 0x00},
+ {0x3491, 0xC8},
+ {0x3492, 0x00},
+ {0x3493, 0x02},
+ {0x3494, 0x00},
+ {0x3495, 0x02},
+ {0x3496, 0x02},
+ {0x3497, 0x06},
+ {0x3498, 0x05},
+ {0x3499, 0x04},
+ {0x349A, 0x09},
+ {0x349B, 0x05},
+ {0x349C, 0x17},
+ {0x349D, 0x05},
+ {0x349E, 0x00},
+ {0x349F, 0x00},
+ {0x34A0, 0x00},
+ {0x34A1, 0x00},
+ {0x34A2, 0x08},
+ {0x34A3, 0x08},
+ {0x34A4, 0x00},
+ {0x34A5, 0x0B},
+ {0x34A6, 0x0C},
+ {0x34A7, 0x32},
+ {0x34A8, 0x10},
+ {0x34A9, 0xE0},
+ {0x34AA, 0x52},
+ {0x34AB, 0x00},
+ {0x34AC, 0x60},
+ {0x34AD, 0x2B},
+ {0x34AE, 0x25},
+ {0x34AF, 0x48},
+ {0x34B1, 0x06},
+ {0x34B2, 0xF8},
+ {0x34C3, 0xB0},
+ {0x34C4, 0x4A},
+ {0x34C5, 0xC0},
+ {0x34C7, 0x55},
+ {0x34C8, 0x03},
+ {0x34CB, 0x00},
+ {0x353A, 0x00},
+ {0x355E, 0x48},
+ {0x3572, 0xB0},
+ {0x3573, 0x4A},
+ {0x3574, 0xC0},
+ {0x3576, 0x55},
+ {0x3577, 0x03},
+ {0x357A, 0x00},
+ {0x35DA, 0x00},
+ {0x4003, 0x02},
+ {0x4004, 0x02},
+};
+
+static const char * const hm11b1_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Color",
+ "Color Bar",
+ "Color Bar Blending",
+ "PN11",
+};
+
+static const s64 link_freq_menu_items[] = {
+ HM11B1_LINK_FREQ_384MHZ,
+};
+
+static const struct hm11b1_link_freq_config link_freq_configs[] = {
+ [HM11B1_LINK_FREQ_384MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_384mbps),
+ .regs = mipi_data_rate_384mbps,
+ }
+ },
+};
+
+static const struct hm11b1_mode supported_modes[] = {
+ {
+ .width = 1292,
+ .height = 800,
+ .hts = 1344,
+ .vts_def = HM11B1_VTS_DEF,
+ .vts_min = HM11B1_VTS_MIN,
+ .reg_list = {
+ .num_of_regs =
+ ARRAY_SIZE(sensor_1292x800_30fps_setting),
+ .regs = sensor_1292x800_30fps_setting,
+ },
+ .link_freq_index = HM11B1_LINK_FREQ_384MHZ_INDEX,
+ },
+};
+
+struct hm11b1 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct hm11b1_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static inline struct hm11b1 *to_hm11b1(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct hm11b1, sd);
+}
+
+static u64 to_pixel_rate(u32 f_index)
+{
+ u64 pixel_rate = link_freq_menu_items[f_index] * 2 * HM11B1_DATA_LANES;
+
+ do_div(pixel_rate, HM11B1_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static u64 to_pixels_per_line(u32 hts, u32 f_index)
+{
+ u64 ppl = hts * to_pixel_rate(f_index);
+
+ do_div(ppl, HM11B1_SCLK);
+
+ return ppl;
+}
+
+static int hm11b1_read_reg(struct hm11b1 *hm11b1, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret = 0;
+
+ if (len > sizeof(data_buf))
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[sizeof(data_buf) - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return ret < 0 ? ret : -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int hm11b1_write_reg(struct hm11b1 *hm11b1, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+ u8 buf[6];
+ int ret = 0;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int hm11b1_write_reg_list(struct hm11b1 *hm11b1,
+ const struct hm11b1_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = hm11b1_write_reg(hm11b1, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "write reg 0x%4.4x return err = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hm11b1_update_digital_gain(struct hm11b1 *hm11b1, u32 d_gain)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+ int ret = 0;
+
+ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_DGTL_GAIN, 2, d_gain);
+ if (ret) {
+ dev_err(&client->dev, "failed to set HM11B1_REG_DGTL_GAIN");
+ return ret;
+ }
+
+ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_DGTL_GAIN_IR, 2, d_gain);
+ if (ret) {
+ dev_err(&client->dev, "failed to set HM11B1_REG_DGTL_GAIN_IR");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int hm11b1_test_pattern(struct hm11b1 *hm11b1, u32 pattern)
+{
+ if (pattern)
+ pattern = pattern << HM11B1_TEST_PATTERN_BAR_SHIFT |
+ HM11B1_TEST_PATTERN_ENABLE;
+
+ return hm11b1_write_reg(hm11b1, HM11B1_REG_TEST_PATTERN, 1, pattern);
+}
+
+static int hm11b1_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hm11b1 *hm11b1 = container_of(ctrl->handler,
+ struct hm11b1, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hm11b1->cur_mode->height + ctrl->val -
+ HM11B1_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hm11b1->exposure,
+ hm11b1->exposure->minimum,
+ exposure_max, hm11b1->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_COMMAND_UPDATE, 1, 1);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable HM11B1_REG_COMMAND_UPDATE");
+ pm_runtime_put(&client->dev);
+ return ret;
+ }
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_ANALOG_GAIN, 1,
+ ctrl->val);
+ ret |= hm11b1_write_reg(hm11b1, HM11B1_REG_ANALOG_GAIN_IR, 1,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hm11b1_update_digital_gain(hm11b1, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ /* 4 least significant bits of expsoure are fractional part */
+ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_EXPOSURE, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_VTS, 2,
+ hm11b1->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = hm11b1_test_pattern(hm11b1, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ ret |= hm11b1_write_reg(hm11b1, HM11B1_REG_COMMAND_UPDATE, 1, 0);
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hm11b1_ctrl_ops = {
+ .s_ctrl = hm11b1_set_ctrl,
+};
+
+static int hm11b1_init_controls(struct hm11b1 *hm11b1)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ const struct hm11b1_mode *cur_mode;
+ s64 exposure_max, h_blank, pixel_rate;
+ u32 vblank_min, vblank_max, vblank_default;
+ int size;
+ int ret = 0;
+
+ ctrl_hdlr = &hm11b1->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hm11b1->mutex;
+ cur_mode = hm11b1->cur_mode;
+ size = ARRAY_SIZE(link_freq_menu_items);
+
+ hm11b1->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &hm11b1_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ size - 1, 0,
+ link_freq_menu_items);
+ if (hm11b1->link_freq)
+ hm11b1->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = to_pixel_rate(HM11B1_LINK_FREQ_384MHZ_INDEX);
+ hm11b1->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ pixel_rate, 1, pixel_rate);
+
+ vblank_min = cur_mode->vts_min - cur_mode->height;
+ vblank_max = HM11B1_VTS_MAX - cur_mode->height;
+ vblank_default = cur_mode->vts_def - cur_mode->height;
+ hm11b1->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_default);
+
+ h_blank = to_pixels_per_line(cur_mode->hts, cur_mode->link_freq_index);
+ h_blank -= cur_mode->width;
+ hm11b1->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hm11b1->hblank)
+ hm11b1->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HM11B1_ANAL_GAIN_MIN, HM11B1_ANAL_GAIN_MAX,
+ HM11B1_ANAL_GAIN_STEP, HM11B1_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HM11B1_DGTL_GAIN_MIN, HM11B1_DGTL_GAIN_MAX,
+ HM11B1_DGTL_GAIN_STEP, HM11B1_DGTL_GAIN_DEFAULT);
+ exposure_max = cur_mode->vts_def - HM11B1_EXPOSURE_MAX_MARGIN;
+ hm11b1->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hm11b1_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HM11B1_EXPOSURE_MIN, exposure_max,
+ HM11B1_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hm11b1_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hm11b1_test_pattern_menu) - 1,
+ 0, 0, hm11b1_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ hm11b1->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void hm11b1_update_pad_format(const struct hm11b1_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int hm11b1_start_streaming(struct hm11b1 *hm11b1)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+ const struct hm11b1_reg_list *reg_list;
+ int link_freq_index;
+ int ret = 0;
+
+ power_ctrl_logic_set_power(1);
+ link_freq_index = hm11b1->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = hm11b1_write_reg_list(hm11b1, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &hm11b1->cur_mode->reg_list;
+ ret = hm11b1_write_reg_list(hm11b1, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(hm11b1->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = hm11b1_write_reg(hm11b1, HM11B1_REG_MODE_SELECT, 1,
+ HM11B1_MODE_STREAMING);
+ if (ret)
+ dev_err(&client->dev, "failed to start streaming");
+
+ return ret;
+}
+
+static void hm11b1_stop_streaming(struct hm11b1 *hm11b1)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+
+ if (hm11b1_write_reg(hm11b1, HM11B1_REG_MODE_SELECT, 1,
+ HM11B1_MODE_STANDBY))
+ dev_err(&client->dev, "failed to stop streaming");
+ power_ctrl_logic_set_power(0);
+}
+
+static int hm11b1_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hm11b1 *hm11b1 = to_hm11b1(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hm11b1->streaming == enable)
+ return 0;
+
+ mutex_lock(&hm11b1->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&hm11b1->mutex);
+ return ret;
+ }
+
+ ret = hm11b1_start_streaming(hm11b1);
+ if (ret) {
+ enable = 0;
+ hm11b1_stop_streaming(hm11b1);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ hm11b1_stop_streaming(hm11b1);
+ pm_runtime_put(&client->dev);
+ }
+
+ hm11b1->streaming = enable;
+ mutex_unlock(&hm11b1->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused hm11b1_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hm11b1 *hm11b1 = to_hm11b1(sd);
+
+ mutex_lock(&hm11b1->mutex);
+ if (hm11b1->streaming)
+ hm11b1_stop_streaming(hm11b1);
+
+ mutex_unlock(&hm11b1->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused hm11b1_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hm11b1 *hm11b1 = to_hm11b1(sd);
+ int ret = 0;
+
+ mutex_lock(&hm11b1->mutex);
+ if (!hm11b1->streaming)
+ goto exit;
+
+ ret = hm11b1_start_streaming(hm11b1);
+ if (ret) {
+ hm11b1->streaming = false;
+ hm11b1_stop_streaming(hm11b1);
+ }
+
+exit:
+ mutex_unlock(&hm11b1->mutex);
+ return ret;
+}
+
+static int hm11b1_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hm11b1 *hm11b1 = to_hm11b1(sd);
+ const struct hm11b1_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&hm11b1->mutex);
+ hm11b1_update_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ } else {
+ hm11b1->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(hm11b1->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(hm11b1->pixel_rate,
+ to_pixel_rate(mode->link_freq_index));
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(hm11b1->vblank,
+ mode->vts_min - mode->height,
+ HM11B1_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hm11b1->vblank, vblank_def);
+ h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+ mode->width;
+ __v4l2_ctrl_modify_range(hm11b1->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+ mutex_unlock(&hm11b1->mutex);
+
+ return 0;
+}
+
+static int hm11b1_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct hm11b1 *hm11b1 = to_hm11b1(sd);
+
+ mutex_lock(&hm11b1->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&hm11b1->sd,
+ sd_state, fmt->pad);
+ else
+ hm11b1_update_pad_format(hm11b1->cur_mode, &fmt->format);
+
+ mutex_unlock(&hm11b1->mutex);
+
+ return 0;
+}
+
+static int hm11b1_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int hm11b1_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int hm11b1_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct hm11b1 *hm11b1 = to_hm11b1(sd);
+
+ mutex_lock(&hm11b1->mutex);
+ hm11b1_update_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
+ mutex_unlock(&hm11b1->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hm11b1_video_ops = {
+ .s_stream = hm11b1_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hm11b1_pad_ops = {
+ .set_fmt = hm11b1_set_format,
+ .get_fmt = hm11b1_get_format,
+ .enum_mbus_code = hm11b1_enum_mbus_code,
+ .enum_frame_size = hm11b1_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops hm11b1_subdev_ops = {
+ .video = &hm11b1_video_ops,
+ .pad = &hm11b1_pad_ops,
+};
+
+static const struct media_entity_operations hm11b1_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops hm11b1_internal_ops = {
+ .open = hm11b1_open,
+};
+
+static int hm11b1_identify_module(struct hm11b1 *hm11b1)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hm11b1->sd);
+ int ret;
+ u32 val;
+
+ ret = hm11b1_read_reg(hm11b1, HM11B1_REG_CHIP_ID, 2, &val);
+ if (ret)
+ return ret;
+
+ if (val != HM11B1_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ HM11B1_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int hm11b1_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hm11b1 *hm11b1 = to_hm11b1(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&hm11b1->mutex);
+
+ return 0;
+}
+
+static int hm11b1_probe(struct i2c_client *client)
+{
+ struct hm11b1 *hm11b1;
+ int ret = 0;
+
+ hm11b1 = devm_kzalloc(&client->dev, sizeof(*hm11b1), GFP_KERNEL);
+ if (!hm11b1)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&hm11b1->sd, client, &hm11b1_subdev_ops);
+ power_ctrl_logic_set_power(0);
+ power_ctrl_logic_set_power(1);
+ ret = hm11b1_identify_module(hm11b1);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ return ret;
+ }
+
+ mutex_init(&hm11b1->mutex);
+ hm11b1->cur_mode = &supported_modes[0];
+ ret = hm11b1_init_controls(hm11b1);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ hm11b1->sd.internal_ops = &hm11b1_internal_ops;
+ hm11b1->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hm11b1->sd.entity.ops = &hm11b1_subdev_entity_ops;
+ hm11b1->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hm11b1->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hm11b1->sd.entity, 1, &hm11b1->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&hm11b1->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ power_ctrl_logic_set_power(0);
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&hm11b1->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hm11b1->sd.ctrl_handler);
+ mutex_destroy(&hm11b1->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops hm11b1_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(hm11b1_suspend, hm11b1_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hm11b1_acpi_ids[] = {
+ {"HIMX11B1"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, hm11b1_acpi_ids);
+#endif
+
+static struct i2c_driver hm11b1_i2c_driver = {
+ .driver = {
+ .name = "hm11b1",
+ .pm = &hm11b1_pm_ops,
+ .acpi_match_table = ACPI_PTR(hm11b1_acpi_ids),
+ },
+ .probe_new = hm11b1_probe,
+ .remove = hm11b1_remove,
+};
+
+module_i2c_driver(hm11b1_i2c_driver);
+
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Lai, Jim <jim.lai@intel.com>");
+MODULE_DESCRIPTION("Himax HM11B1 sensor driver");
+MODULE_LICENSE("GPL v2");
new file mode 100644
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020-2021 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <linux/vsc.h>
+
+#define OV01A10_LINK_FREQ_400MHZ 400000000ULL
+#define OV01A10_SCLK 40000000LL
+#define OV01A10_MCLK 19200000
+#define OV01A10_DATA_LANES 1
+#define OV01A10_RGB_DEPTH 10
+
+#define OV01A10_REG_CHIP_ID 0x300a
+#define OV01A10_CHIP_ID 0x560141
+
+#define OV01A10_REG_MODE_SELECT 0x0100
+#define OV01A10_MODE_STANDBY 0x00
+#define OV01A10_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define OV01A10_REG_VTS 0x380e
+#define OV01A10_VTS_DEF 0x0380
+#define OV01A10_VTS_MIN 0x0380
+#define OV01A10_VTS_MAX 0xffff
+
+/* Exposure controls from sensor */
+#define OV01A10_REG_EXPOSURE 0x3501
+#define OV01A10_EXPOSURE_MIN 4
+#define OV01A10_EXPOSURE_MAX_MARGIN 8
+#define OV01A10_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV01A10_REG_ANALOG_GAIN 0x3508
+#define OV01A10_ANAL_GAIN_MIN 0x100
+#define OV01A10_ANAL_GAIN_MAX 0xffff
+#define OV01A10_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV01A10_REG_DIGILAL_GAIN_B 0x350A
+#define OV01A10_REG_DIGITAL_GAIN_GB 0x3510
+#define OV01A10_REG_DIGITAL_GAIN_GR 0x3513
+#define OV01A10_REG_DIGITAL_GAIN_R 0x3516
+#define OV01A10_DGTL_GAIN_MIN 0
+#define OV01A10_DGTL_GAIN_MAX 0x3ffff
+#define OV01A10_DGTL_GAIN_STEP 1
+#define OV01A10_DGTL_GAIN_DEFAULT 1024
+
+/* Test Pattern Control */
+#define OV01A10_REG_TEST_PATTERN 0x4503
+#define OV01A10_TEST_PATTERN_ENABLE BIT(7)
+#define OV01A10_TEST_PATTERN_BAR_SHIFT 0
+
+enum {
+ OV01A10_LINK_FREQ_400MHZ_INDEX,
+};
+
+struct ov01a10_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov01a10_reg_list {
+ u32 num_of_regs;
+ const struct ov01a10_reg *regs;
+};
+
+struct ov01a10_link_freq_config {
+ const struct ov01a10_reg_list reg_list;
+};
+
+struct ov01a10_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct ov01a10_reg_list reg_list;
+};
+
+static const struct ov01a10_reg mipi_data_rate_720mbps[] = {
+};
+
+static const struct ov01a10_reg sensor_1280x800_setting[] = {
+ {0x0103, 0x01},
+ {0x0302, 0x00},
+ {0x0303, 0x06},
+ {0x0304, 0x01},
+ {0x0305, 0xe0},
+ {0x0306, 0x00},
+ {0x0308, 0x01},
+ {0x0309, 0x00},
+ {0x030c, 0x01},
+ {0x0322, 0x01},
+ {0x0323, 0x06},
+ {0x0324, 0x01},
+ {0x0325, 0x68},
+ {0x3002, 0xa1},
+ {0x301e, 0xf0},
+ {0x3022, 0x01},
+ {0x3501, 0x03},
+ {0x3502, 0x78},
+ {0x3504, 0x0c},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3601, 0xc0},
+ {0x3603, 0x71},
+ {0x3610, 0x68},
+ {0x3611, 0x86},
+ {0x3640, 0x10},
+ {0x3641, 0x80},
+ {0x3642, 0xdc},
+ {0x3646, 0x55},
+ {0x3647, 0x57},
+ {0x364b, 0x00},
+ {0x3653, 0x10},
+ {0x3655, 0x00},
+ {0x3656, 0x00},
+ {0x365f, 0x0f},
+ {0x3661, 0x45},
+ {0x3662, 0x24},
+ {0x3663, 0x11},
+ {0x3664, 0x07},
+ {0x3709, 0x34},
+ {0x370b, 0x6f},
+ {0x3714, 0x22},
+ {0x371b, 0x27},
+ {0x371c, 0x67},
+ {0x371d, 0xa7},
+ {0x371e, 0xe7},
+ {0x3730, 0x81},
+ {0x3733, 0x10},
+ {0x3734, 0x40},
+ {0x3737, 0x04},
+ {0x3739, 0x1c},
+ {0x3767, 0x00},
+ {0x376c, 0x81},
+ {0x3772, 0x14},
+ {0x37c2, 0x04},
+ {0x37d8, 0x03},
+ {0x37d9, 0x0c},
+ {0x37e0, 0x00},
+ {0x37e1, 0x08},
+ {0x37e2, 0x10},
+ {0x37e3, 0x04},
+ {0x37e4, 0x04},
+ {0x37e5, 0x03},
+ {0x37e6, 0x04},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x05},
+ {0x3805, 0x0f},
+ {0x3806, 0x03},
+ {0x3807, 0x2f},
+ {0x3808, 0x05},
+ {0x3809, 0x00},
+ {0x380a, 0x03},
+ {0x380b, 0x20},
+ {0x380c, 0x02},
+ {0x380d, 0xe8},
+ {0x380e, 0x03},
+ {0x380f, 0x80},
+ {0x3810, 0x00},
+ {0x3811, 0x09},
+ {0x3812, 0x00},
+ {0x3813, 0x08},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x3820, 0xa8},
+ {0x3822, 0x13},
+ {0x3832, 0x28},
+ {0x3833, 0x10},
+ {0x3b00, 0x00},
+ {0x3c80, 0x00},
+ {0x3c88, 0x02},
+ {0x3c8c, 0x07},
+ {0x3c8d, 0x40},
+ {0x3cc7, 0x80},
+ {0x4000, 0xc3},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x02},
+ {0x4009, 0x19},
+ {0x400a, 0x01},
+ {0x400b, 0x6c},
+ {0x4011, 0x00},
+ {0x4041, 0x00},
+ {0x4300, 0xff},
+ {0x4301, 0x00},
+ {0x4302, 0x0f},
+ {0x4503, 0x00},
+ {0x4601, 0x50},
+ {0x4800, 0x64},
+ {0x481f, 0x34},
+ {0x4825, 0x33},
+ {0x4837, 0x11},
+ {0x4881, 0x40},
+ {0x4883, 0x01},
+ {0x4890, 0x00},
+ {0x4901, 0x00},
+ {0x4902, 0x00},
+ {0x4b00, 0x2a},
+ {0x4b0d, 0x00},
+ {0x450a, 0x04},
+ {0x450b, 0x00},
+ {0x5000, 0x65},
+ {0x5200, 0x18},
+ {0x5004, 0x00},
+ {0x5080, 0x40},
+ {0x0305, 0xf4},
+ {0x0325, 0xc2},
+ {0x380c, 0x05},
+ {0x380d, 0xd0},
+};
+
+static const char * const ov01a10_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Color Bar type 4",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV01A10_LINK_FREQ_400MHZ,
+};
+
+static const struct ov01a10_link_freq_config link_freq_configs[] = {
+ [OV01A10_LINK_FREQ_400MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
+ .regs = mipi_data_rate_720mbps,
+ }
+ },
+};
+
+static const struct ov01a10_mode supported_modes[] = {
+ {
+ .width = 1280,
+ .height = 800,
+ .hts = 1488,
+ .vts_def = OV01A10_VTS_DEF,
+ .vts_min = OV01A10_VTS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(sensor_1280x800_setting),
+ .regs = sensor_1280x800_setting,
+ },
+ .link_freq_index = OV01A10_LINK_FREQ_400MHZ_INDEX,
+ },
+};
+
+struct ov01a10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov01a10_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static inline struct ov01a10 *to_ov01a10(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov01a10, sd);
+}
+
+static int ov01a10_read_reg(struct ov01a10 *ov01a10, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret = 0;
+
+ if (len > sizeof(data_buf))
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[sizeof(data_buf) - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+
+ if (ret != ARRAY_SIZE(msgs))
+ return ret < 0 ? ret : -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int ov01a10_write_reg(struct ov01a10 *ov01a10, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ u8 buf[6];
+ int ret = 0;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int ov01a10_write_reg_list(struct ov01a10 *ov01a10,
+ const struct ov01a10_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = ov01a10_write_reg(ov01a10, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "write reg 0x%4.4x return err = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov01a10_update_digital_gain(struct ov01a10 *ov01a10, u32 d_gain)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ u32 real = d_gain << 6;
+ int ret = 0;
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGILAL_GAIN_B, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_B");
+ return ret;
+ }
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_GB, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_GB");
+ return ret;
+ }
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_GR, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_GR");
+ return ret;
+ }
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_DIGITAL_GAIN_R, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A10_REG_DIGITAL_GAIN_R");
+ return ret;
+ }
+ return ret;
+}
+
+static int ov01a10_test_pattern(struct ov01a10 *ov01a10, u32 pattern)
+{
+ if (pattern)
+ pattern = (pattern - 1) << OV01A10_TEST_PATTERN_BAR_SHIFT |
+ OV01A10_TEST_PATTERN_ENABLE;
+
+ return ov01a10_write_reg(ov01a10, OV01A10_REG_TEST_PATTERN, 1, pattern);
+}
+
+static int ov01a10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov01a10 *ov01a10 = container_of(ctrl->handler,
+ struct ov01a10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = ov01a10->cur_mode->height + ctrl->val -
+ OV01A10_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov01a10->exposure,
+ ov01a10->exposure->minimum,
+ exposure_max, ov01a10->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_ANALOG_GAIN, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov01a10_update_digital_gain(ov01a10, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_EXPOSURE, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_VTS, 2,
+ ov01a10->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov01a10_test_pattern(ov01a10, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov01a10_ctrl_ops = {
+ .s_ctrl = ov01a10_set_ctrl,
+};
+
+static int ov01a10_init_controls(struct ov01a10 *ov01a10)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ const struct ov01a10_mode *cur_mode;
+ s64 exposure_max, h_blank;
+ u32 vblank_min, vblank_max, vblank_default;
+ int size;
+ int ret = 0;
+
+ ctrl_hdlr = &ov01a10->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &ov01a10->mutex;
+ cur_mode = ov01a10->cur_mode;
+ size = ARRAY_SIZE(link_freq_menu_items);
+
+ ov01a10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov01a10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ size - 1, 0,
+ link_freq_menu_items);
+ if (ov01a10->link_freq)
+ ov01a10->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ov01a10->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ OV01A10_SCLK, 1, OV01A10_SCLK);
+
+ vblank_min = cur_mode->vts_min - cur_mode->height;
+ vblank_max = OV01A10_VTS_MAX - cur_mode->height;
+ vblank_default = cur_mode->vts_def - cur_mode->height;
+ ov01a10->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_default);
+
+ h_blank = cur_mode->hts - cur_mode->width;
+ ov01a10->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov01a10->hblank)
+ ov01a10->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV01A10_ANAL_GAIN_MIN, OV01A10_ANAL_GAIN_MAX,
+ OV01A10_ANAL_GAIN_STEP, OV01A10_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV01A10_DGTL_GAIN_MIN, OV01A10_DGTL_GAIN_MAX,
+ OV01A10_DGTL_GAIN_STEP, OV01A10_DGTL_GAIN_DEFAULT);
+ exposure_max = cur_mode->vts_def - OV01A10_EXPOSURE_MAX_MARGIN;
+ ov01a10->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV01A10_EXPOSURE_MIN,
+ exposure_max,
+ OV01A10_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov01a10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov01a10_test_pattern_menu) - 1,
+ 0, 0, ov01a10_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov01a10->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov01a10_update_pad_format(const struct ov01a10_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov01a10_start_streaming(struct ov01a10 *ov01a10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ const struct ov01a10_reg_list *reg_list;
+ int link_freq_index;
+ int ret = 0;
+
+ link_freq_index = ov01a10->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov01a10_write_reg_list(ov01a10, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &ov01a10->cur_mode->reg_list;
+ ret = ov01a10_write_reg_list(ov01a10, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov01a10->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_MODE_SELECT, 1,
+ OV01A10_MODE_STREAMING);
+ if (ret)
+ dev_err(&client->dev, "failed to start streaming");
+
+ return ret;
+}
+
+static void ov01a10_stop_streaming(struct ov01a10 *ov01a10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ int ret = 0;
+
+ ret = ov01a10_write_reg(ov01a10, OV01A10_REG_MODE_SELECT, 1,
+ OV01A10_MODE_STANDBY);
+ if (ret)
+ dev_err(&client->dev, "failed to stop streaming");
+}
+
+static int ov01a10_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (ov01a10->streaming == enable)
+ return 0;
+
+ mutex_lock(&ov01a10->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&ov01a10->mutex);
+ return ret;
+ }
+
+ ret = ov01a10_start_streaming(ov01a10);
+ if (ret) {
+ enable = 0;
+ ov01a10_stop_streaming(ov01a10);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ ov01a10_stop_streaming(ov01a10);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov01a10->streaming = enable;
+ mutex_unlock(&ov01a10->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov01a10_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+
+ mutex_lock(&ov01a10->mutex);
+ if (ov01a10->streaming)
+ ov01a10_stop_streaming(ov01a10);
+
+ mutex_unlock(&ov01a10->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused ov01a10_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+ int ret = 0;
+
+ mutex_lock(&ov01a10->mutex);
+ if (!ov01a10->streaming)
+ goto exit;
+
+ ret = ov01a10_start_streaming(ov01a10);
+ if (ret) {
+ ov01a10->streaming = false;
+ ov01a10_stop_streaming(ov01a10);
+ }
+
+exit:
+ mutex_unlock(&ov01a10->mutex);
+ return ret;
+}
+
+static int ov01a10_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+ const struct ov01a10_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&ov01a10->mutex);
+ ov01a10_update_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ } else {
+ ov01a10->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov01a10->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(ov01a10->pixel_rate, OV01A10_SCLK);
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov01a10->vblank,
+ mode->vts_min - mode->height,
+ OV01A10_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov01a10->vblank, vblank_def);
+ h_blank = mode->hts - mode->width;
+ __v4l2_ctrl_modify_range(ov01a10->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+ mutex_unlock(&ov01a10->mutex);
+
+ return 0;
+}
+
+static int ov01a10_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+
+ mutex_lock(&ov01a10->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&ov01a10->sd,
+ sd_state, fmt->pad);
+ else
+ ov01a10_update_pad_format(ov01a10->cur_mode, &fmt->format);
+
+ mutex_unlock(&ov01a10->mutex);
+
+ return 0;
+}
+
+static int ov01a10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
+
+ return 0;
+}
+
+static int ov01a10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SBGGR10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov01a10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+
+ mutex_lock(&ov01a10->mutex);
+ ov01a10_update_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
+ mutex_unlock(&ov01a10->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov01a10_video_ops = {
+ .s_stream = ov01a10_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov01a10_pad_ops = {
+ .set_fmt = ov01a10_set_format,
+ .get_fmt = ov01a10_get_format,
+ .enum_mbus_code = ov01a10_enum_mbus_code,
+ .enum_frame_size = ov01a10_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov01a10_subdev_ops = {
+ .video = &ov01a10_video_ops,
+ .pad = &ov01a10_pad_ops,
+};
+
+static const struct media_entity_operations ov01a10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov01a10_internal_ops = {
+ .open = ov01a10_open,
+};
+
+static int ov01a10_identify_module(struct ov01a10 *ov01a10)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a10->sd);
+ int ret;
+ u32 val;
+
+ ret = ov01a10_read_reg(ov01a10, OV01A10_REG_CHIP_ID, 3, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV01A10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV01A10_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov01a10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a10 *ov01a10 = to_ov01a10(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&ov01a10->mutex);
+
+ return 0;
+}
+
+static int ov01a10_probe(struct i2c_client *client)
+{
+ struct ov01a10 *ov01a10;
+ int ret = 0;
+ struct vsc_mipi_config conf;
+ struct vsc_camera_status status;
+ s64 link_freq;
+
+ conf.lane_num = OV01A10_DATA_LANES;
+ /* frequency unit 100k */
+ conf.freq = OV01A10_LINK_FREQ_400MHZ / 100000;
+ ret = vsc_acquire_camera_sensor(&conf, NULL, NULL, &status);
+ if (ret == -EAGAIN)
+ return -EPROBE_DEFER;
+ else if (ret) {
+ dev_err(&client->dev, "Acquire VSC failed.\n");
+ return ret;
+ }
+ ov01a10 = devm_kzalloc(&client->dev, sizeof(*ov01a10), GFP_KERNEL);
+ if (!ov01a10) {
+ ret = -ENOMEM;
+ goto probe_error_ret;
+ }
+
+ v4l2_i2c_subdev_init(&ov01a10->sd, client, &ov01a10_subdev_ops);
+
+ ret = ov01a10_identify_module(ov01a10);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto probe_error_ret;
+ }
+
+ mutex_init(&ov01a10->mutex);
+ ov01a10->cur_mode = &supported_modes[0];
+ ret = ov01a10_init_controls(ov01a10);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov01a10->sd.internal_ops = &ov01a10_internal_ops;
+ ov01a10->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov01a10->sd.entity.ops = &ov01a10_subdev_entity_ops;
+ ov01a10->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov01a10->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov01a10->sd.entity, 1, &ov01a10->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov01a10->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ vsc_release_camera_sensor(&status);
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov01a10->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov01a10->sd.ctrl_handler);
+ mutex_destroy(&ov01a10->mutex);
+
+probe_error_ret:
+ vsc_release_camera_sensor(&status);
+ return ret;
+}
+
+static const struct dev_pm_ops ov01a10_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov01a10_suspend, ov01a10_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov01a10_acpi_ids[] = {
+ {"OVTI01A0"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, ov01a10_acpi_ids);
+#endif
+
+static struct i2c_driver ov01a10_i2c_driver = {
+ .driver = {
+ .name = "ov01a10",
+ .pm = &ov01a10_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov01a10_acpi_ids),
+ },
+ .probe_new = ov01a10_probe,
+ .remove = ov01a10_remove,
+};
+
+module_i2c_driver(ov01a10_i2c_driver);
+
+MODULE_AUTHOR("Wang Yating <yating.wang@intel.com>");
+MODULE_DESCRIPTION("OmniVision OV01A10 sensor driver");
+MODULE_LICENSE("GPL v2");
new file mode 100644
@@ -0,0 +1,949 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020-2021 Intel Corporation.
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include "power_ctrl_logic.h"
+#include <linux/vsc.h>
+
+#define OV01A1S_LINK_FREQ_400MHZ 400000000ULL
+#define OV01A1S_SCLK 40000000LL
+#define OV01A1S_MCLK 19200000
+#define OV01A1S_DATA_LANES 1
+#define OV01A1S_RGB_DEPTH 10
+
+#define OV01A1S_REG_CHIP_ID 0x300a
+#define OV01A1S_CHIP_ID 0x560141
+
+#define OV01A1S_REG_MODE_SELECT 0x0100
+#define OV01A1S_MODE_STANDBY 0x00
+#define OV01A1S_MODE_STREAMING 0x01
+
+/* vertical-timings from sensor */
+#define OV01A1S_REG_VTS 0x380e
+#define OV01A1S_VTS_DEF 0x0380
+#define OV01A1S_VTS_MIN 0x0380
+#define OV01A1S_VTS_MAX 0xffff
+
+/* Exposure controls from sensor */
+#define OV01A1S_REG_EXPOSURE 0x3501
+#define OV01A1S_EXPOSURE_MIN 4
+#define OV01A1S_EXPOSURE_MAX_MARGIN 8
+#define OV01A1S_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define OV01A1S_REG_ANALOG_GAIN 0x3508
+#define OV01A1S_ANAL_GAIN_MIN 0x100
+#define OV01A1S_ANAL_GAIN_MAX 0xffff
+#define OV01A1S_ANAL_GAIN_STEP 1
+
+/* Digital gain controls from sensor */
+#define OV01A1S_REG_DIGILAL_GAIN_B 0x350A
+#define OV01A1S_REG_DIGITAL_GAIN_GB 0x3510
+#define OV01A1S_REG_DIGITAL_GAIN_GR 0x3513
+#define OV01A1S_REG_DIGITAL_GAIN_R 0x3516
+#define OV01A1S_DGTL_GAIN_MIN 0
+#define OV01A1S_DGTL_GAIN_MAX 0x3ffff
+#define OV01A1S_DGTL_GAIN_STEP 1
+#define OV01A1S_DGTL_GAIN_DEFAULT 1024
+
+/* Test Pattern Control */
+#define OV01A1S_REG_TEST_PATTERN 0x4503
+#define OV01A1S_TEST_PATTERN_ENABLE BIT(7)
+#define OV01A1S_TEST_PATTERN_BAR_SHIFT 0
+
+enum {
+ OV01A1S_LINK_FREQ_400MHZ_INDEX,
+};
+
+struct ov01a1s_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov01a1s_reg_list {
+ u32 num_of_regs;
+ const struct ov01a1s_reg *regs;
+};
+
+struct ov01a1s_link_freq_config {
+ const struct ov01a1s_reg_list reg_list;
+};
+
+struct ov01a1s_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timining size */
+ u32 hts;
+
+ /* Default vertical timining size */
+ u32 vts_def;
+
+ /* Min vertical timining size */
+ u32 vts_min;
+
+ /* Link frequency needed for this resolution */
+ u32 link_freq_index;
+
+ /* Sensor register settings for this resolution */
+ const struct ov01a1s_reg_list reg_list;
+};
+
+static const struct ov01a1s_reg mipi_data_rate_720mbps[] = {
+};
+
+static const struct ov01a1s_reg sensor_1296x800_setting[] = {
+ {0x0103, 0x01},
+ {0x0302, 0x00},
+ {0x0303, 0x06},
+ {0x0304, 0x01},
+ {0x0305, 0x90},
+ {0x0306, 0x00},
+ {0x0308, 0x01},
+ {0x0309, 0x00},
+ {0x030c, 0x01},
+ {0x0322, 0x01},
+ {0x0323, 0x06},
+ {0x0324, 0x01},
+ {0x0325, 0x68},
+ {0x3002, 0xa1},
+ {0x301e, 0xf0},
+ {0x3022, 0x01},
+ {0x3501, 0x03},
+ {0x3502, 0x78},
+ {0x3504, 0x0c},
+ {0x3508, 0x01},
+ {0x3509, 0x00},
+ {0x3601, 0xc0},
+ {0x3603, 0x71},
+ {0x3610, 0x68},
+ {0x3611, 0x86},
+ {0x3640, 0x10},
+ {0x3641, 0x80},
+ {0x3642, 0xdc},
+ {0x3646, 0x55},
+ {0x3647, 0x57},
+ {0x364b, 0x00},
+ {0x3653, 0x10},
+ {0x3655, 0x00},
+ {0x3656, 0x00},
+ {0x365f, 0x0f},
+ {0x3661, 0x45},
+ {0x3662, 0x24},
+ {0x3663, 0x11},
+ {0x3664, 0x07},
+ {0x3709, 0x34},
+ {0x370b, 0x6f},
+ {0x3714, 0x22},
+ {0x371b, 0x27},
+ {0x371c, 0x67},
+ {0x371d, 0xa7},
+ {0x371e, 0xe7},
+ {0x3730, 0x81},
+ {0x3733, 0x10},
+ {0x3734, 0x40},
+ {0x3737, 0x04},
+ {0x3739, 0x1c},
+ {0x3767, 0x00},
+ {0x376c, 0x81},
+ {0x3772, 0x14},
+ {0x37c2, 0x04},
+ {0x37d8, 0x03},
+ {0x37d9, 0x0c},
+ {0x37e0, 0x00},
+ {0x37e1, 0x08},
+ {0x37e2, 0x10},
+ {0x37e3, 0x04},
+ {0x37e4, 0x04},
+ {0x37e5, 0x03},
+ {0x37e6, 0x04},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x00},
+ {0x3804, 0x05},
+ {0x3805, 0x0f},
+ {0x3806, 0x03},
+ {0x3807, 0x2f},
+ {0x3808, 0x05},
+ {0x3809, 0x00},
+ {0x380a, 0x03},
+ {0x380b, 0x1e},
+ {0x380c, 0x05},
+ {0x380d, 0xd0},
+ {0x380e, 0x03},
+ {0x380f, 0x80},
+ {0x3810, 0x00},
+ {0x3811, 0x09},
+ {0x3812, 0x00},
+ {0x3813, 0x08},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x3820, 0xa8},
+ {0x3822, 0x03},
+ {0x3832, 0x28},
+ {0x3833, 0x10},
+ {0x3b00, 0x00},
+ {0x3c80, 0x00},
+ {0x3c88, 0x02},
+ {0x3c8c, 0x07},
+ {0x3c8d, 0x40},
+ {0x3cc7, 0x80},
+ {0x4000, 0xc3},
+ {0x4001, 0xe0},
+ {0x4003, 0x40},
+ {0x4008, 0x02},
+ {0x4009, 0x19},
+ {0x400a, 0x01},
+ {0x400b, 0x6c},
+ {0x4011, 0x00},
+ {0x4041, 0x00},
+ {0x4300, 0xff},
+ {0x4301, 0x00},
+ {0x4302, 0x0f},
+ {0x4503, 0x00},
+ {0x4601, 0x50},
+ {0x481f, 0x34},
+ {0x4825, 0x33},
+ {0x4837, 0x14},
+ {0x4881, 0x40},
+ {0x4883, 0x01},
+ {0x4890, 0x00},
+ {0x4901, 0x00},
+ {0x4902, 0x00},
+ {0x4b00, 0x2a},
+ {0x4b0d, 0x00},
+ {0x450a, 0x04},
+ {0x450b, 0x00},
+ {0x5000, 0x65},
+ {0x5004, 0x00},
+ {0x5080, 0x40},
+ {0x5200, 0x18},
+ {0x4837, 0x14},
+ {0x0305, 0xf4},
+ {0x0325, 0xc2},
+ {0x3808, 0x05},
+ {0x3809, 0x10},
+ {0x380a, 0x03},
+ {0x380b, 0x1e},
+ {0x3810, 0x00},
+ {0x3811, 0x00},
+ {0x3812, 0x00},
+ {0x3813, 0x09},
+ {0x3820, 0x88},
+ {0x373d, 0x24},
+};
+
+static const char * const ov01a1s_test_pattern_menu[] = {
+ "Disabled",
+ "Color Bar",
+ "Top-Bottom Darker Color Bar",
+ "Right-Left Darker Color Bar",
+ "Color Bar type 4",
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV01A1S_LINK_FREQ_400MHZ,
+};
+
+static const struct ov01a1s_link_freq_config link_freq_configs[] = {
+ [OV01A1S_LINK_FREQ_400MHZ_INDEX] = {
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_720mbps),
+ .regs = mipi_data_rate_720mbps,
+ }
+ },
+};
+
+static const struct ov01a1s_mode supported_modes[] = {
+ {
+ .width = 1296,
+ .height = 798,
+ .hts = 1488,
+ .vts_def = OV01A1S_VTS_DEF,
+ .vts_min = OV01A1S_VTS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(sensor_1296x800_setting),
+ .regs = sensor_1296x800_setting,
+ },
+ .link_freq_index = OV01A1S_LINK_FREQ_400MHZ_INDEX,
+ },
+};
+
+struct ov01a1s {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov01a1s_mode *cur_mode;
+
+ /* To serialize asynchronus callbacks */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static inline struct ov01a1s *to_ov01a1s(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ov01a1s, sd);
+}
+
+static int ov01a1s_read_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = {0};
+ int ret = 0;
+
+ if (len > sizeof(data_buf))
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[sizeof(data_buf) - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return ret < 0 ? ret : -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+static int ov01a1s_write_reg(struct ov01a1s *ov01a1s, u16 reg, u16 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ u8 buf[6];
+ int ret = 0;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << 8 * (4 - len), buf + 2);
+
+ ret = i2c_master_send(client, buf, len + 2);
+ if (ret != len + 2)
+ return ret < 0 ? ret : -EIO;
+
+ return 0;
+}
+
+static int ov01a1s_write_reg_list(struct ov01a1s *ov01a1s,
+ const struct ov01a1s_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ ret = ov01a1s_write_reg(ov01a1s, r_list->regs[i].address, 1,
+ r_list->regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "write reg 0x%4.4x return err = %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov01a1s_update_digital_gain(struct ov01a1s *ov01a1s, u32 d_gain)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ u32 real = d_gain << 6;
+ int ret = 0;
+
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGILAL_GAIN_B, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_B");
+ return ret;
+ }
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_GB, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_GB");
+ return ret;
+ }
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_GR, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_GR");
+ return ret;
+ }
+
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_DIGITAL_GAIN_R, 3, real);
+ if (ret) {
+ dev_err(&client->dev, "failed to set OV01A1S_REG_DIGITAL_GAIN_R");
+ return ret;
+ }
+ return ret;
+}
+
+static int ov01a1s_test_pattern(struct ov01a1s *ov01a1s, u32 pattern)
+{
+ if (pattern)
+ pattern = (pattern - 1) << OV01A1S_TEST_PATTERN_BAR_SHIFT |
+ OV01A1S_TEST_PATTERN_ENABLE;
+
+ return ov01a1s_write_reg(ov01a1s, OV01A1S_REG_TEST_PATTERN, 1, pattern);
+}
+
+static int ov01a1s_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov01a1s *ov01a1s = container_of(ctrl->handler,
+ struct ov01a1s, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ s64 exposure_max;
+ int ret = 0;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = ov01a1s->cur_mode->height + ctrl->val -
+ OV01A1S_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(ov01a1s->exposure,
+ ov01a1s->exposure->minimum,
+ exposure_max, ov01a1s->exposure->step,
+ exposure_max);
+ }
+
+ /* V4L2 controls values will be applied only when power is already up */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_ANALOG_GAIN, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov01a1s_update_digital_gain(ov01a1s, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_EXPOSURE, 2,
+ ctrl->val);
+ break;
+
+ case V4L2_CID_VBLANK:
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_VTS, 2,
+ ov01a1s->cur_mode->height + ctrl->val);
+ break;
+
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov01a1s_test_pattern(ov01a1s, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov01a1s_ctrl_ops = {
+ .s_ctrl = ov01a1s_set_ctrl,
+};
+
+static int ov01a1s_init_controls(struct ov01a1s *ov01a1s)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ const struct ov01a1s_mode *cur_mode;
+ s64 exposure_max, h_blank;
+ u32 vblank_min, vblank_max, vblank_default;
+ int size;
+ int ret = 0;
+
+ ctrl_hdlr = &ov01a1s->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &ov01a1s->mutex;
+ cur_mode = ov01a1s->cur_mode;
+ size = ARRAY_SIZE(link_freq_menu_items);
+
+ ov01a1s->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov01a1s_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ size - 1, 0,
+ link_freq_menu_items);
+ if (ov01a1s->link_freq)
+ ov01a1s->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ov01a1s->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ OV01A1S_SCLK, 1, OV01A1S_SCLK);
+
+ vblank_min = cur_mode->vts_min - cur_mode->height;
+ vblank_max = OV01A1S_VTS_MAX - cur_mode->height;
+ vblank_default = cur_mode->vts_def - cur_mode->height;
+ ov01a1s->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ vblank_max, 1, vblank_default);
+
+ h_blank = cur_mode->hts - cur_mode->width;
+ ov01a1s->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank,
+ 1, h_blank);
+ if (ov01a1s->hblank)
+ ov01a1s->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV01A1S_ANAL_GAIN_MIN, OV01A1S_ANAL_GAIN_MAX,
+ OV01A1S_ANAL_GAIN_STEP, OV01A1S_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV01A1S_DGTL_GAIN_MIN, OV01A1S_DGTL_GAIN_MAX,
+ OV01A1S_DGTL_GAIN_STEP, OV01A1S_DGTL_GAIN_DEFAULT);
+ exposure_max = cur_mode->vts_def - OV01A1S_EXPOSURE_MAX_MARGIN;
+ ov01a1s->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov01a1s_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV01A1S_EXPOSURE_MIN,
+ exposure_max,
+ OV01A1S_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov01a1s_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov01a1s_test_pattern_menu) - 1,
+ 0, 0, ov01a1s_test_pattern_menu);
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ov01a1s->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static void ov01a1s_update_pad_format(const struct ov01a1s_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->field = V4L2_FIELD_NONE;
+}
+
+static int ov01a1s_start_streaming(struct ov01a1s *ov01a1s)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ const struct ov01a1s_reg_list *reg_list;
+ int link_freq_index;
+ int ret = 0;
+
+ power_ctrl_logic_set_power(1);
+ link_freq_index = ov01a1s->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov01a1s_write_reg_list(ov01a1s, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls");
+ return ret;
+ }
+
+ reg_list = &ov01a1s->cur_mode->reg_list;
+ ret = ov01a1s_write_reg_list(ov01a1s, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov01a1s->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1,
+ OV01A1S_MODE_STREAMING);
+ if (ret)
+ dev_err(&client->dev, "failed to start streaming");
+
+ return ret;
+}
+
+static void ov01a1s_stop_streaming(struct ov01a1s *ov01a1s)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ int ret = 0;
+
+ ret = ov01a1s_write_reg(ov01a1s, OV01A1S_REG_MODE_SELECT, 1,
+ OV01A1S_MODE_STANDBY);
+ if (ret)
+ dev_err(&client->dev, "failed to stop streaming");
+ power_ctrl_logic_set_power(0);
+}
+
+static int ov01a1s_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (ov01a1s->streaming == enable)
+ return 0;
+
+ mutex_lock(&ov01a1s->mutex);
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ mutex_unlock(&ov01a1s->mutex);
+ return ret;
+ }
+
+ ret = ov01a1s_start_streaming(ov01a1s);
+ if (ret) {
+ enable = 0;
+ ov01a1s_stop_streaming(ov01a1s);
+ pm_runtime_put(&client->dev);
+ }
+ } else {
+ ov01a1s_stop_streaming(ov01a1s);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov01a1s->streaming = enable;
+ mutex_unlock(&ov01a1s->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov01a1s_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+ mutex_lock(&ov01a1s->mutex);
+ if (ov01a1s->streaming)
+ ov01a1s_stop_streaming(ov01a1s);
+
+ mutex_unlock(&ov01a1s->mutex);
+
+ return 0;
+}
+
+static int __maybe_unused ov01a1s_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+ int ret = 0;
+
+ mutex_lock(&ov01a1s->mutex);
+ if (!ov01a1s->streaming)
+ goto exit;
+
+ ret = ov01a1s_start_streaming(ov01a1s);
+ if (ret) {
+ ov01a1s->streaming = false;
+ ov01a1s_stop_streaming(ov01a1s);
+ }
+
+exit:
+ mutex_unlock(&ov01a1s->mutex);
+ return ret;
+}
+
+static int ov01a1s_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+ const struct ov01a1s_mode *mode;
+ s32 vblank_def, h_blank;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width,
+ height, fmt->format.width,
+ fmt->format.height);
+
+ mutex_lock(&ov01a1s->mutex);
+ ov01a1s_update_pad_format(mode, &fmt->format);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
+ } else {
+ ov01a1s->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov01a1s->link_freq, mode->link_freq_index);
+ __v4l2_ctrl_s_ctrl_int64(ov01a1s->pixel_rate, OV01A1S_SCLK);
+
+ /* Update limits and set FPS to default */
+ vblank_def = mode->vts_def - mode->height;
+ __v4l2_ctrl_modify_range(ov01a1s->vblank,
+ mode->vts_min - mode->height,
+ OV01A1S_VTS_MAX - mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov01a1s->vblank, vblank_def);
+ h_blank = mode->hts - mode->width;
+ __v4l2_ctrl_modify_range(ov01a1s->hblank, h_blank, h_blank, 1,
+ h_blank);
+ }
+ mutex_unlock(&ov01a1s->mutex);
+
+ return 0;
+}
+
+static int ov01a1s_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+ mutex_lock(&ov01a1s->mutex);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&ov01a1s->sd,
+ sd_state, fmt->pad);
+ else
+ ov01a1s_update_pad_format(ov01a1s->cur_mode, &fmt->format);
+
+ mutex_unlock(&ov01a1s->mutex);
+
+ return 0;
+}
+
+static int ov01a1s_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov01a1s_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov01a1s_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+ mutex_lock(&ov01a1s->mutex);
+ ov01a1s_update_pad_format(&supported_modes[0],
+ v4l2_subdev_get_try_format(sd, fh->state, 0));
+ mutex_unlock(&ov01a1s->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov01a1s_video_ops = {
+ .s_stream = ov01a1s_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov01a1s_pad_ops = {
+ .set_fmt = ov01a1s_set_format,
+ .get_fmt = ov01a1s_get_format,
+ .enum_mbus_code = ov01a1s_enum_mbus_code,
+ .enum_frame_size = ov01a1s_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov01a1s_subdev_ops = {
+ .video = &ov01a1s_video_ops,
+ .pad = &ov01a1s_pad_ops,
+};
+
+static const struct media_entity_operations ov01a1s_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov01a1s_internal_ops = {
+ .open = ov01a1s_open,
+};
+
+static int ov01a1s_identify_module(struct ov01a1s *ov01a1s)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov01a1s->sd);
+ int ret;
+ u32 val;
+
+ ret = ov01a1s_read_reg(ov01a1s, OV01A1S_REG_CHIP_ID, 3, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV01A1S_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV01A1S_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int ov01a1s_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov01a1s *ov01a1s = to_ov01a1s(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(&client->dev);
+ mutex_destroy(&ov01a1s->mutex);
+
+ return 0;
+}
+
+static int ov01a1s_probe(struct i2c_client *client)
+{
+ struct ov01a1s *ov01a1s;
+ int ret = 0;
+ struct vsc_mipi_config conf;
+ struct vsc_camera_status status;
+ s64 link_freq;
+
+ conf.lane_num = OV01A1S_DATA_LANES;
+ /* frequency unit 100k */
+ conf.freq = OV01A1S_LINK_FREQ_400MHZ / 100000;
+ ret = vsc_acquire_camera_sensor(&conf, NULL, NULL, &status);
+ if (ret == -EAGAIN)
+ ret = power_ctrl_logic_set_power(1);
+ if (ret == -EAGAIN)
+ return -EPROBE_DEFER;
+ else if (ret)
+ return ret;
+ ov01a1s = devm_kzalloc(&client->dev, sizeof(*ov01a1s), GFP_KERNEL);
+ if (!ov01a1s) {
+ ret = -ENOMEM;
+ goto probe_error_ret;
+ }
+
+ v4l2_i2c_subdev_init(&ov01a1s->sd, client, &ov01a1s_subdev_ops);
+ ret = ov01a1s_identify_module(ov01a1s);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto probe_error_ret;
+ }
+
+ mutex_init(&ov01a1s->mutex);
+ ov01a1s->cur_mode = &supported_modes[0];
+ ret = ov01a1s_init_controls(ov01a1s);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ov01a1s->sd.internal_ops = &ov01a1s_internal_ops;
+ ov01a1s->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov01a1s->sd.entity.ops = &ov01a1s_subdev_entity_ops;
+ ov01a1s->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov01a1s->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov01a1s->sd.entity, 1, &ov01a1s->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto probe_error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov01a1s->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto probe_error_media_entity_cleanup;
+ }
+
+ vsc_release_camera_sensor(&status);
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ power_ctrl_logic_set_power(0);
+ return 0;
+
+probe_error_media_entity_cleanup:
+ media_entity_cleanup(&ov01a1s->sd.entity);
+
+probe_error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov01a1s->sd.ctrl_handler);
+ mutex_destroy(&ov01a1s->mutex);
+
+probe_error_ret:
+ power_ctrl_logic_set_power(0);
+ vsc_release_camera_sensor(&status);
+ return ret;
+}
+
+static const struct dev_pm_ops ov01a1s_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov01a1s_suspend, ov01a1s_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov01a1s_acpi_ids[] = {
+ { "OVTI01AS" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, ov01a1s_acpi_ids);
+#endif
+
+static struct i2c_driver ov01a1s_i2c_driver = {
+ .driver = {
+ .name = "ov01a1s",
+ .pm = &ov01a1s_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov01a1s_acpi_ids),
+ },
+ .probe_new = ov01a1s_probe,
+ .remove = ov01a1s_remove,
+};
+
+module_i2c_driver(ov01a1s_i2c_driver);
+
+MODULE_AUTHOR("Xu, Chongyang <chongyang.xu@intel.com>");
+MODULE_AUTHOR("Lai, Jim <jim.lai@intel.com>");
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_DESCRIPTION("OmniVision OV01A1S sensor driver");
+MODULE_LICENSE("GPL v2");
new file mode 100644
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020-2021 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/gpio/consumer.h>
+
+#define PCL_DRV_NAME "power_ctrl_logic"
+
+struct power_ctrl_logic {
+ /* gpio resource*/
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *powerdn_gpio;
+ struct gpio_desc *clocken_gpio;
+ struct gpio_desc *indled_gpio;
+ /* status */
+ struct mutex status_lock;
+ bool power_on;
+ bool gpio_ready;
+};
+
+struct power_ctrl_gpio {
+ const char *name;
+ struct gpio_desc **pin;
+};
+
+/* mcu gpio resources*/
+static const struct acpi_gpio_params camreset_gpio = { 0, 0, false };
+static const struct acpi_gpio_params campwdn_gpio = { 1, 0, false };
+static const struct acpi_gpio_params midmclken_gpio = { 2, 0, false };
+static const struct acpi_gpio_params led_gpio = { 3, 0, false };
+static const struct acpi_gpio_mapping dsc1_acpi_gpios[] = {
+ { "camreset-gpios", &camreset_gpio, 1 },
+ { "campwdn-gpios", &campwdn_gpio, 1 },
+ { "midmclken-gpios", &midmclken_gpio, 1 },
+ { "indled-gpios", &led_gpio, 1 },
+ { }
+};
+
+static struct power_ctrl_logic pcl = {
+ .reset_gpio = NULL,
+ .powerdn_gpio = NULL,
+ .clocken_gpio = NULL,
+ .indled_gpio = NULL,
+ .power_on = false,
+ .gpio_ready = false,
+};
+
+static struct power_ctrl_gpio pcl_gpios[] = {
+ { "camreset", &pcl.reset_gpio },
+ { "campwdn", &pcl.powerdn_gpio },
+ { "midmclken", &pcl.clocken_gpio},
+ { "indled", &pcl.indled_gpio},
+};
+
+static int power_ctrl_logic_add(struct acpi_device *adev)
+{
+ int i, ret;
+
+ dev_dbg(&adev->dev, "@%s, enter\n", __func__);
+ set_primary_fwnode(&adev->dev, &adev->fwnode);
+
+ ret = acpi_dev_add_driver_gpios(adev, dsc1_acpi_gpios);
+ if (ret) {
+ dev_err(&adev->dev, "@%s: --111---fail to add gpio. ret %d\n", __func__, ret);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pcl_gpios); i++) {
+ *pcl_gpios[i].pin = gpiod_get(&adev->dev, pcl_gpios[i].name, GPIOD_OUT_LOW);
+ if (IS_ERR(*pcl_gpios[i].pin)) {
+ dev_dbg(&adev->dev, "failed to get gpio %s\n", pcl_gpios[i].name);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ mutex_lock(&pcl.status_lock);
+ pcl.gpio_ready = true;
+ mutex_unlock(&pcl.status_lock);
+
+ dev_dbg(&adev->dev, "@%s, exit\n", __func__);
+ return ret;
+}
+
+static int power_ctrl_logic_remove(struct acpi_device *adev)
+{
+ dev_dbg(&adev->dev, "@%s, enter\n", __func__);
+ mutex_lock(&pcl.status_lock);
+ pcl.gpio_ready = false;
+ gpiod_set_value_cansleep(pcl.reset_gpio, 0);
+ gpiod_put(pcl.reset_gpio);
+ gpiod_set_value_cansleep(pcl.powerdn_gpio, 0);
+ gpiod_put(pcl.powerdn_gpio);
+ gpiod_set_value_cansleep(pcl.clocken_gpio, 0);
+ gpiod_put(pcl.clocken_gpio);
+ gpiod_set_value_cansleep(pcl.indled_gpio, 0);
+ gpiod_put(pcl.indled_gpio);
+ mutex_unlock(&pcl.status_lock);
+ dev_dbg(&adev->dev, "@%s, exit\n", __func__);
+ return 0;
+}
+
+static struct acpi_device_id acpi_ids[] = {
+ { "INT3472", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ids);
+
+static struct acpi_driver _driver = {
+ .name = PCL_DRV_NAME,
+ .class = PCL_DRV_NAME,
+ .ids = acpi_ids,
+ .ops = {
+ .add = power_ctrl_logic_add,
+ .remove = power_ctrl_logic_remove,
+ },
+};
+module_acpi_driver(_driver);
+
+int power_ctrl_logic_set_power(int on)
+{
+ mutex_lock(&pcl.status_lock);
+ if (!pcl.gpio_ready) {
+ pr_debug("@%s,failed to set power, gpio_ready=%d, on=%d\n",
+ __func__, pcl.gpio_ready, on);
+ mutex_unlock(&pcl.status_lock);
+ return -EAGAIN;
+ }
+ if (pcl.power_on != on) {
+ gpiod_set_value_cansleep(pcl.reset_gpio, on);
+ gpiod_set_value_cansleep(pcl.powerdn_gpio, on);
+ gpiod_set_value_cansleep(pcl.clocken_gpio, on);
+ gpiod_set_value_cansleep(pcl.indled_gpio, on);
+ pcl.power_on = on;
+ }
+ mutex_unlock(&pcl.status_lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(power_ctrl_logic_set_power);
+
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Xu, Chongyang <chongyang.xu@intel.com>");
+MODULE_DESCRIPTION("Power Control Logic Driver");
+MODULE_LICENSE("GPL v2");
new file mode 100644
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020-2021 Intel Corporation. */
+
+#ifndef _POWER_CTRL_LOGIC_H_
+#define _POWER_CTRL_LOGIC_H_
+
+/* exported function for extern module */
+int power_ctrl_logic_set_power(int on);
+#endif
@@ -55,7 +55,7 @@ source "drivers/media/pci/smipcie/Kconfig"
source "drivers/media/pci/netup_unidvb/Kconfig"
endif
-source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/Kconfig"
config VIDEO_PCI_SKELETON
tristate "Skeleton PCI V4L2 driver"
new file mode 100644
@@ -0,0 +1,20 @@
+config VIDEO_INTEL_IPU6
+ tristate "Intel IPU driver"
+ depends on ACPI
+ depends on MEDIA_SUPPORT
+ depends on MEDIA_PCI_SUPPORT
+ select IOMMU_API
+ select IOMMU_IOVA
+ select X86_DEV_DMA_OPS if X86
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ select PHYS_ADDR_T_64BIT
+ select COMMON_CLK
+ help
+ This is the Intel imaging processing unit, found in Intel SoCs and
+ used for capturing images and video from a camera sensor.
+
+ To compile this driver, say Y here! It contains 3 modules -
+ intel_ipu6, intel_ipu6_isys and intel_ipu6_psys.
+
+source "drivers/media/pci/intel/ipu3/Kconfig"
@@ -3,4 +3,13 @@
# Makefile for the IPU3 cio2 and ImGU drivers
#
-obj-y += ipu3/
+# force check the compile warning to make sure zero warnings
+# note we may have build issue when gcc upgraded.
+subdir-ccflags-y := -Wall -Wextra
+subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
+subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
+subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
+subdir-ccflags-$(CONFIG_VIDEO_INTEL_IPU_WERROR) += -Werror
+
+obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3/
+obj-$(CONFIG_VIDEO_INTEL_IPU6) += ipu6/
new file mode 100644
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+
+#include "ipu.h"
+#include "ipu-platform.h"
+#include "ipu-dma.h"
+
+#ifdef CONFIG_PM
+static struct bus_type ipu_bus;
+
+static int bus_pm_runtime_suspend(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ int rval;
+
+ rval = pm_generic_runtime_suspend(dev);
+ if (rval)
+ return rval;
+
+ rval = ipu_buttress_power(dev, adev->ctrl, false);
+ dev_dbg(dev, "%s: buttress power down %d\n", __func__, rval);
+ if (!rval)
+ return 0;
+
+ dev_err(dev, "power down failed!\n");
+
+ /* Powering down failed, attempt to resume device now */
+ rval = pm_generic_runtime_resume(dev);
+ if (!rval)
+ return -EBUSY;
+
+ return -EIO;
+}
+
+static int bus_pm_runtime_resume(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ int rval;
+
+ rval = ipu_buttress_power(dev, adev->ctrl, true);
+ dev_dbg(dev, "%s: buttress power up %d\n", __func__, rval);
+ if (rval)
+ return rval;
+
+ rval = pm_generic_runtime_resume(dev);
+ dev_dbg(dev, "%s: resume %d\n", __func__, rval);
+ if (rval)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ipu_buttress_power(dev, adev->ctrl, false);
+
+ return -EBUSY;
+}
+
+static const struct dev_pm_ops ipu_bus_pm_ops = {
+ .runtime_suspend = bus_pm_runtime_suspend,
+ .runtime_resume = bus_pm_runtime_resume,
+};
+
+#define IPU_BUS_PM_OPS (&ipu_bus_pm_ops)
+#else
+#define IPU_BUS_PM_OPS NULL
+#endif
+
+static int ipu_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct ipu_bus_driver *adrv = to_ipu_bus_driver(drv);
+
+ dev_dbg(dev, "bus match: \"%s\" --- \"%s\"\n", dev_name(dev),
+ adrv->wanted);
+
+ return !strncmp(dev_name(dev), adrv->wanted, strlen(adrv->wanted));
+}
+
+static int ipu_bus_probe(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver);
+ int rval;
+
+ dev_dbg(dev, "bus probe dev %s\n", dev_name(dev));
+
+ adev->adrv = adrv;
+ if (!adrv->probe) {
+ rval = -ENODEV;
+ goto out_err;
+ }
+ rval = pm_runtime_get_sync(&adev->dev);
+ if (rval < 0) {
+ dev_err(&adev->dev, "Failed to get runtime PM\n");
+ goto out_err;
+ }
+
+ rval = adrv->probe(adev);
+ pm_runtime_put(&adev->dev);
+
+ if (rval)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ipu_bus_set_drvdata(adev, NULL);
+ adev->adrv = NULL;
+
+ return rval;
+}
+
+static void ipu_bus_remove(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_bus_driver *adrv = to_ipu_bus_driver(dev->driver);
+
+ if (adrv->remove)
+ adrv->remove(adev);
+}
+
+static struct bus_type ipu_bus = {
+ .name = IPU_BUS_NAME,
+ .match = ipu_bus_match,
+ .probe = ipu_bus_probe,
+ .remove = ipu_bus_remove,
+ .pm = IPU_BUS_PM_OPS,
+};
+
+static struct mutex ipu_bus_mutex;
+
+static void ipu_bus_release(struct device *dev)
+{
+}
+
+struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev,
+ struct device *parent, void *pdata,
+ struct ipu_buttress_ctrl *ctrl,
+ char *name, unsigned int nr)
+{
+ struct ipu_bus_device *adev;
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+ int rval;
+
+ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return ERR_PTR(-ENOMEM);
+
+ adev->dev.parent = parent;
+ adev->dev.bus = &ipu_bus;
+ adev->dev.release = ipu_bus_release;
+ adev->dev.dma_ops = &ipu_dma_ops;
+ adev->dma_mask = DMA_BIT_MASK(isp->secure_mode ?
+ IPU_MMU_ADDRESS_BITS :
+ IPU_MMU_ADDRESS_BITS_NON_SECURE);
+ adev->dev.dma_mask = &adev->dma_mask;
+ adev->dev.dma_parms = pdev->dev.dma_parms;
+ adev->dev.coherent_dma_mask = adev->dma_mask;
+ adev->ctrl = ctrl;
+ adev->pdata = pdata;
+ adev->isp = isp;
+ mutex_init(&adev->resume_lock);
+ dev_set_name(&adev->dev, "%s%d", name, nr);
+
+ rval = device_register(&adev->dev);
+ if (rval) {
+ put_device(&adev->dev);
+ return ERR_PTR(rval);
+ }
+
+ mutex_lock(&ipu_bus_mutex);
+ list_add(&adev->list, &isp->devices);
+ mutex_unlock(&ipu_bus_mutex);
+
+ pm_runtime_allow(&adev->dev);
+ pm_runtime_enable(&adev->dev);
+
+ return adev;
+}
+
+void ipu_bus_del_devices(struct pci_dev *pdev)
+{
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+ struct ipu_bus_device *adev, *save;
+
+ mutex_lock(&ipu_bus_mutex);
+
+ list_for_each_entry_safe(adev, save, &isp->devices, list) {
+ pm_runtime_disable(&adev->dev);
+ list_del(&adev->list);
+ device_unregister(&adev->dev);
+ }
+
+ mutex_unlock(&ipu_bus_mutex);
+}
+
+int ipu_bus_register_driver(struct ipu_bus_driver *adrv)
+{
+ adrv->drv.bus = &ipu_bus;
+ return driver_register(&adrv->drv);
+}
+EXPORT_SYMBOL(ipu_bus_register_driver);
+
+int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv)
+{
+ driver_unregister(&adrv->drv);
+ return 0;
+}
+EXPORT_SYMBOL(ipu_bus_unregister_driver);
+
+int ipu_bus_register(void)
+{
+ mutex_init(&ipu_bus_mutex);
+ return bus_register(&ipu_bus);
+}
+
+void ipu_bus_unregister(void)
+{
+ mutex_destroy(&ipu_bus_mutex);
+ return bus_unregister(&ipu_bus);
+}
+
+static int flr_rpm_recovery(struct device *dev, void *p)
+{
+ dev_dbg(dev, "FLR recovery call\n");
+ /*
+ * We are not necessarily going through device from child to
+ * parent. runtime PM refuses to change state for parent if the child
+ * is still active. At FLR (full reset for whole IPU) that doesn't
+ * matter. Everything has been power gated by HW during the FLR cycle
+ * and we are just cleaning up SW state. Thus, ignore child during
+ * set_suspended.
+ */
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_set_suspended(dev);
+ pm_suspend_ignore_children(dev, false);
+
+ return 0;
+}
+
+int ipu_bus_flr_recovery(void)
+{
+ bus_for_each_dev(&ipu_bus, NULL, NULL, flr_rpm_recovery);
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_BUS_H
+#define IPU_BUS_H
+
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+
+#define IPU_BUS_NAME IPU_NAME "-bus"
+
+struct ipu_buttress_ctrl;
+struct ipu_subsystem_trace_config;
+
+struct ipu_bus_device {
+ struct device dev;
+ struct list_head list;
+ void *pdata;
+ struct ipu_bus_driver *adrv;
+ struct ipu_mmu *mmu;
+ struct ipu_device *isp;
+ struct ipu_subsystem_trace_config *trace_cfg;
+ struct ipu_buttress_ctrl *ctrl;
+ u64 dma_mask;
+ /* Protect runtime_resume calls on the dev */
+ struct mutex resume_lock;
+};
+
+#define to_ipu_bus_device(_dev) container_of(_dev, struct ipu_bus_device, dev)
+
+struct ipu_bus_driver {
+ struct device_driver drv;
+ const char *wanted;
+ int (*probe)(struct ipu_bus_device *adev);
+ void (*remove)(struct ipu_bus_device *adev);
+ irqreturn_t (*isr)(struct ipu_bus_device *adev);
+ irqreturn_t (*isr_threaded)(struct ipu_bus_device *adev);
+ bool wake_isr_thread;
+};
+
+#define to_ipu_bus_driver(_drv) container_of(_drv, struct ipu_bus_driver, drv)
+
+struct ipu_bus_device *ipu_bus_add_device(struct pci_dev *pdev,
+ struct device *parent, void *pdata,
+ struct ipu_buttress_ctrl *ctrl,
+ char *name, unsigned int nr);
+void ipu_bus_del_devices(struct pci_dev *pdev);
+
+int ipu_bus_register_driver(struct ipu_bus_driver *adrv);
+int ipu_bus_unregister_driver(struct ipu_bus_driver *adrv);
+
+int ipu_bus_register(void);
+void ipu_bus_unregister(void);
+
+#define module_ipu_bus_driver(drv) \
+ module_driver(drv, ipu_bus_register_driver, \
+ ipu_bus_unregister_driver)
+
+#define ipu_bus_set_drvdata(adev, data) dev_set_drvdata(&(adev)->dev, data)
+#define ipu_bus_get_drvdata(adev) dev_get_drvdata(&(adev)->dev)
+
+int ipu_bus_flr_recovery(void);
+
+#endif
new file mode 100644
@@ -0,0 +1,1372 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+
+#include <media/ipu-isys.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-cpd.h"
+
+#define BOOTLOADER_STATUS_OFFSET 0x15c
+
+#define BOOTLOADER_MAGIC_KEY 0xb00710ad
+
+#define ENTRY BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1
+#define EXIT BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2
+#define QUERY BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE
+
+#define BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX 10
+
+#define BUTTRESS_CSE_BOOTLOAD_TIMEOUT 5000000
+#define BUTTRESS_CSE_AUTHENTICATE_TIMEOUT 10000000
+#define BUTTRESS_CSE_FWRESET_TIMEOUT 100000
+
+#define BUTTRESS_IPC_TX_TIMEOUT 1000
+#define BUTTRESS_IPC_RESET_TIMEOUT 2000
+#define BUTTRESS_IPC_RX_TIMEOUT 1000
+#define BUTTRESS_IPC_VALIDITY_TIMEOUT 1000000
+#define BUTTRESS_TSC_SYNC_TIMEOUT 5000
+
+#define IPU_BUTTRESS_TSC_LIMIT 500 /* 26 us @ 19.2 MHz */
+#define IPU_BUTTRESS_TSC_RETRY 10
+
+#define BUTTRESS_CSE_IPC_RESET_RETRY 4
+
+#define BUTTRESS_IPC_CMD_SEND_RETRY 1
+
+static const u32 ipu_adev_irq_mask[] = {
+ BUTTRESS_ISR_IS_IRQ, BUTTRESS_ISR_PS_IRQ
+};
+
+int ipu_buttress_ipc_reset(struct ipu_device *isp, struct ipu_buttress_ipc *ipc)
+{
+ struct ipu_buttress *b = &isp->buttress;
+ unsigned int retries = BUTTRESS_IPC_RESET_TIMEOUT;
+ u32 val = 0, csr_in_clr;
+
+ if (!isp->secure_mode) {
+ dev_info(&isp->pdev->dev, "Skip ipc reset for non-secure mode");
+ return 0;
+ }
+
+ mutex_lock(&b->ipc_mutex);
+
+ /* Clear-by-1 CSR (all bits), corresponding internal states. */
+ val = readl(isp->base + ipc->csr_in);
+ writel(val, isp->base + ipc->csr_in);
+
+ /* Set peer CSR bit IPC_PEER_COMP_ACTIONS_RST_PHASE1 */
+ writel(ENTRY, isp->base + ipc->csr_out);
+ /*
+ * Clear-by-1 all CSR bits EXCEPT following
+ * bits:
+ * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * C. Possibly custom bits, depending on
+ * their role.
+ */
+ csr_in_clr = BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ |
+ BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID |
+ BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ | QUERY;
+
+ while (retries--) {
+ usleep_range(400, 500);
+ val = readl(isp->base + ipc->csr_in);
+ switch (val) {
+ case (ENTRY | EXIT):
+ case (ENTRY | EXIT | QUERY):
+ dev_dbg(&isp->pdev->dev,
+ "%s:%s & %s\n", __func__,
+ "IPC_PEER_COMP_ACTIONS_RST_PHASE1",
+ "IPC_PEER_COMP_ACTIONS_RST_PHASE2");
+ /*
+ * 1) Clear-by-1 CSR bits
+ * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2).
+ * 2) Set peer CSR bit
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+ */
+ writel(ENTRY | EXIT, isp->base + ipc->csr_in);
+ writel(QUERY, isp->base + ipc->csr_out);
+ break;
+ case ENTRY:
+ case (ENTRY | QUERY):
+ dev_dbg(&isp->pdev->dev,
+ "%s:IPC_PEER_COMP_ACTIONS_RST_PHASE1\n",
+ __func__);
+ /*
+ * 1) Clear-by-1 CSR bits
+ * (IPC_PEER_COMP_ACTIONS_RST_PHASE1,
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE).
+ * 2) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ */
+ writel(ENTRY | QUERY, isp->base + ipc->csr_in);
+ writel(ENTRY, isp->base + ipc->csr_out);
+ break;
+ case EXIT:
+ case (EXIT | QUERY):
+ dev_dbg(&isp->pdev->dev,
+ "%s: IPC_PEER_COMP_ACTIONS_RST_PHASE2\n",
+ __func__);
+ /*
+ * Clear-by-1 CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * 1) Clear incoming doorbell.
+ * 2) Clear-by-1 all CSR bits EXCEPT following
+ * bits:
+ * A. IPC_PEER_COMP_ACTIONS_RST_PHASE1.
+ * B. IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ * C. Possibly custom bits, depending on
+ * their role.
+ * 3) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE2.
+ */
+ writel(EXIT, isp->base + ipc->csr_in);
+ writel(0, isp->base + ipc->db0_in);
+ writel(csr_in_clr, isp->base + ipc->csr_in);
+ writel(EXIT, isp->base + ipc->csr_out);
+
+ /*
+ * Read csr_in again to make sure if RST_PHASE2 is done.
+ * If csr_in is QUERY, it should be handled again.
+ */
+ usleep_range(200, 300);
+ val = readl(isp->base + ipc->csr_in);
+ if (val & QUERY) {
+ dev_dbg(&isp->pdev->dev,
+ "%s: RST_PHASE2 retry csr_in = %x\n",
+ __func__, val);
+ break;
+ }
+ mutex_unlock(&b->ipc_mutex);
+ return 0;
+ case QUERY:
+ dev_dbg(&isp->pdev->dev,
+ "%s: %s\n", __func__,
+ "IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE");
+ /*
+ * 1) Clear-by-1 CSR bit
+ * IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE.
+ * 2) Set peer CSR bit
+ * IPC_PEER_COMP_ACTIONS_RST_PHASE1
+ */
+ writel(QUERY, isp->base + ipc->csr_in);
+ writel(ENTRY, isp->base + ipc->csr_out);
+ break;
+ default:
+ dev_dbg_ratelimited(&isp->pdev->dev,
+ "%s: unexpected CSR 0x%x\n",
+ __func__, val);
+ break;
+ }
+ }
+
+ mutex_unlock(&b->ipc_mutex);
+ dev_err(&isp->pdev->dev, "Timed out while waiting for CSE\n");
+
+ return -ETIMEDOUT;
+}
+
+static void
+ipu_buttress_ipc_validity_close(struct ipu_device *isp,
+ struct ipu_buttress_ipc *ipc)
+{
+ /* Set bit 5 in CSE CSR */
+ writel(BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ,
+ isp->base + ipc->csr_out);
+}
+
+static int
+ipu_buttress_ipc_validity_open(struct ipu_device *isp,
+ struct ipu_buttress_ipc *ipc)
+{
+ unsigned int mask = BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID;
+ unsigned int tout = BUTTRESS_IPC_VALIDITY_TIMEOUT;
+ void __iomem *addr;
+ int ret;
+ u32 val;
+
+ /* Set bit 3 in CSE CSR */
+ writel(BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ,
+ isp->base + ipc->csr_out);
+
+ addr = isp->base + ipc->csr_in;
+ ret = readl_poll_timeout(addr, val, val & mask, 200, tout);
+ if (ret) {
+ val = readl(addr);
+ dev_err(&isp->pdev->dev, "CSE validity timeout 0x%x\n", val);
+ ipu_buttress_ipc_validity_close(isp, ipc);
+ }
+
+ return ret;
+}
+
+static void ipu_buttress_ipc_recv(struct ipu_device *isp,
+ struct ipu_buttress_ipc *ipc, u32 *ipc_msg)
+{
+ if (ipc_msg)
+ *ipc_msg = readl(isp->base + ipc->data0_in);
+ writel(0, isp->base + ipc->db0_in);
+}
+
+static int ipu_buttress_ipc_send_bulk(struct ipu_device *isp,
+ enum ipu_buttress_ipc_domain ipc_domain,
+ struct ipu_ipc_buttress_bulk_msg *msgs,
+ u32 size)
+{
+ struct ipu_buttress *b = &isp->buttress;
+ struct ipu_buttress_ipc *ipc;
+ unsigned long tx_timeout_jiffies, rx_timeout_jiffies;
+ u32 val;
+ int ret;
+ int tout;
+ unsigned int i, retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+
+ ipc = ipc_domain == IPU_BUTTRESS_IPC_CSE ? &b->cse : &b->ish;
+
+ mutex_lock(&b->ipc_mutex);
+
+ ret = ipu_buttress_ipc_validity_open(isp, ipc);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "IPC validity open failed\n");
+ goto out;
+ }
+
+ tx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_TX_TIMEOUT);
+ rx_timeout_jiffies = msecs_to_jiffies(BUTTRESS_IPC_RX_TIMEOUT);
+
+ for (i = 0; i < size; i++) {
+ reinit_completion(&ipc->send_complete);
+ if (msgs[i].require_resp)
+ reinit_completion(&ipc->recv_complete);
+
+ dev_dbg(&isp->pdev->dev, "bulk IPC command: 0x%x\n",
+ msgs[i].cmd);
+ writel(msgs[i].cmd, isp->base + ipc->data0_out);
+
+ val = BUTTRESS_IU2CSEDB0_BUSY | msgs[i].cmd_size;
+
+ writel(val, isp->base + ipc->db0_out);
+
+ tout = wait_for_completion_timeout(&ipc->send_complete,
+ tx_timeout_jiffies);
+ if (!tout) {
+ dev_err(&isp->pdev->dev, "send IPC response timeout\n");
+ if (!retry--) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /*
+ * WORKAROUND: Sometimes CSE is not
+ * responding on first try, try again.
+ */
+ writel(0, isp->base + ipc->db0_out);
+ i--;
+ continue;
+ }
+
+ retry = BUTTRESS_IPC_CMD_SEND_RETRY;
+
+ if (!msgs[i].require_resp)
+ continue;
+
+ tout = wait_for_completion_timeout(&ipc->recv_complete,
+ rx_timeout_jiffies);
+ if (!tout) {
+ dev_err(&isp->pdev->dev, "recv IPC response timeout\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ipc->nack_mask &&
+ (ipc->recv_data & ipc->nack_mask) == ipc->nack) {
+ dev_err(&isp->pdev->dev,
+ "IPC NACK for cmd 0x%x\n", msgs[i].cmd);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ipc->recv_data != msgs[i].expected_resp) {
+ dev_err(&isp->pdev->dev,
+ "expected resp: 0x%x, IPC response: 0x%x ",
+ msgs[i].expected_resp, ipc->recv_data);
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ dev_dbg(&isp->pdev->dev, "bulk IPC commands done\n");
+
+out:
+ ipu_buttress_ipc_validity_close(isp, ipc);
+ mutex_unlock(&b->ipc_mutex);
+ return ret;
+}
+
+static int
+ipu_buttress_ipc_send(struct ipu_device *isp,
+ enum ipu_buttress_ipc_domain ipc_domain,
+ u32 ipc_msg, u32 size, bool require_resp,
+ u32 expected_resp)
+{
+ struct ipu_ipc_buttress_bulk_msg msg = {
+ .cmd = ipc_msg,
+ .cmd_size = size,
+ .require_resp = require_resp,
+ .expected_resp = expected_resp,
+ };
+
+ return ipu_buttress_ipc_send_bulk(isp, ipc_domain, &msg, 1);
+}
+
+static irqreturn_t ipu_buttress_call_isr(struct ipu_bus_device *adev)
+{
+ irqreturn_t ret = IRQ_WAKE_THREAD;
+
+ if (!adev || !adev->adrv)
+ return IRQ_NONE;
+
+ if (adev->adrv->isr)
+ ret = adev->adrv->isr(adev);
+
+ if (ret == IRQ_WAKE_THREAD && !adev->adrv->isr_threaded)
+ ret = IRQ_NONE;
+
+ adev->adrv->wake_isr_thread = (ret == IRQ_WAKE_THREAD);
+
+ return ret;
+}
+
+irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr)
+{
+ struct ipu_device *isp = isp_ptr;
+ struct ipu_bus_device *adev[] = { isp->isys, isp->psys };
+ struct ipu_buttress *b = &isp->buttress;
+ irqreturn_t ret = IRQ_NONE;
+ u32 disable_irqs = 0;
+ u32 irq_status;
+ u32 reg_irq_sts = BUTTRESS_REG_ISR_STATUS;
+ unsigned int i;
+
+ pm_runtime_get(&isp->pdev->dev);
+
+ if (!pm_runtime_active(&isp->pdev->dev)) {
+ irq_status = readl(isp->base + reg_irq_sts);
+ writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
+ pm_runtime_put(&isp->pdev->dev);
+ return IRQ_HANDLED;
+ }
+
+ irq_status = readl(isp->base + reg_irq_sts);
+ if (!irq_status) {
+ pm_runtime_put(&isp->pdev->dev);
+ return IRQ_NONE;
+ }
+
+ do {
+ writel(irq_status, isp->base + BUTTRESS_REG_ISR_CLEAR);
+
+ for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) {
+ if (irq_status & ipu_adev_irq_mask[i]) {
+ irqreturn_t r = ipu_buttress_call_isr(adev[i]);
+
+ if (r == IRQ_WAKE_THREAD) {
+ ret = IRQ_WAKE_THREAD;
+ disable_irqs |= ipu_adev_irq_mask[i];
+ } else if (ret == IRQ_NONE &&
+ r == IRQ_HANDLED) {
+ ret = IRQ_HANDLED;
+ }
+ }
+ }
+
+ if (irq_status & (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING |
+ BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING |
+ BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE |
+ BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH |
+ BUTTRESS_ISR_SAI_VIOLATION) &&
+ ret == IRQ_NONE)
+ ret = IRQ_HANDLED;
+
+ if (irq_status & BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING\n");
+ ipu_buttress_ipc_recv(isp, &b->cse, &b->cse.recv_data);
+ complete(&b->cse.recv_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING\n");
+ ipu_buttress_ipc_recv(isp, &b->ish, &b->ish.recv_data);
+ complete(&b->ish.recv_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+ complete(&b->cse.send_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH) {
+ dev_dbg(&isp->pdev->dev,
+ "BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE\n");
+ complete(&b->ish.send_complete);
+ }
+
+ if (irq_status & BUTTRESS_ISR_SAI_VIOLATION &&
+ ipu_buttress_get_secure_mode(isp)) {
+ dev_err(&isp->pdev->dev,
+ "BUTTRESS_ISR_SAI_VIOLATION\n");
+ WARN_ON(1);
+ }
+
+ irq_status = readl(isp->base + reg_irq_sts);
+ } while (irq_status && !isp->flr_done);
+
+ if (disable_irqs)
+ writel(BUTTRESS_IRQS & ~disable_irqs,
+ isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ pm_runtime_put(&isp->pdev->dev);
+
+ return ret;
+}
+
+irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr)
+{
+ struct ipu_device *isp = isp_ptr;
+ struct ipu_bus_device *adev[] = { isp->isys, isp->psys };
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i;
+
+ dev_dbg(&isp->pdev->dev, "isr: Buttress threaded interrupt handler\n");
+
+ for (i = 0; i < ARRAY_SIZE(ipu_adev_irq_mask); i++) {
+ if (adev[i] && adev[i]->adrv &&
+ adev[i]->adrv->wake_isr_thread &&
+ adev[i]->adrv->isr_threaded(adev[i]) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ return ret;
+}
+
+int ipu_buttress_power(struct device *dev,
+ struct ipu_buttress_ctrl *ctrl, bool on)
+{
+ struct ipu_device *isp = to_ipu_bus_device(dev)->isp;
+ u32 pwr_sts, val;
+ int ret = 0;
+
+ if (!ctrl)
+ return 0;
+
+ /* Until FLR completion nothing is expected to work */
+ if (isp->flr_done)
+ return 0;
+
+ mutex_lock(&isp->buttress.power_mutex);
+
+ if (!on) {
+ val = 0;
+ pwr_sts = ctrl->pwr_sts_off << ctrl->pwr_sts_shift;
+ } else {
+ val = BUTTRESS_FREQ_CTL_START |
+ ctrl->divisor << ctrl->divisor_shift |
+ ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT |
+ BUTTRESS_FREQ_CTL_ICCMAX_LEVEL;
+
+ pwr_sts = ctrl->pwr_sts_on << ctrl->pwr_sts_shift;
+ }
+
+ writel(val, isp->base + ctrl->freq_ctl);
+
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE,
+ val, ((val & ctrl->pwr_sts_mask) == pwr_sts),
+ 100, BUTTRESS_POWER_TIMEOUT);
+ if (ret)
+ dev_err(&isp->pdev->dev,
+ "Change power status timeout with 0x%x\n", val);
+
+ ctrl->started = !ret && on;
+
+ mutex_unlock(&isp->buttress.power_mutex);
+
+ return ret;
+}
+
+static bool secure_mode_enable = 1;
+module_param(secure_mode_enable, bool, 0660);
+MODULE_PARM_DESC(secure_mode, "IPU secure mode enable");
+
+void ipu_buttress_set_secure_mode(struct ipu_device *isp)
+{
+ u8 retry = 100;
+ u32 val, read;
+
+ /*
+ * HACK to disable possible secure mode. This can be
+ * reverted when CSE is disabling the secure mode
+ */
+ read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ if (secure_mode_enable)
+ val = read |= BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+ else
+ val = read & ~BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+
+ if (val == read)
+ return;
+
+ writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ /* In B0, for some registers in buttress, because of a hw bug, write
+ * might not succeed at first attempt. Write twice until the
+ * write is successful
+ */
+ writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ while (retry--) {
+ read = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+ if (read == val)
+ break;
+
+ writel(val, isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ if (retry == 0)
+ dev_err(&isp->pdev->dev,
+ "update security control register failed\n");
+ }
+}
+
+bool ipu_buttress_get_secure_mode(struct ipu_device *isp)
+{
+ u32 val;
+
+ val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ return val & BUTTRESS_SECURITY_CTL_FW_SECURE_MODE;
+}
+
+bool ipu_buttress_auth_done(struct ipu_device *isp)
+{
+ u32 val;
+
+ if (!isp->secure_mode)
+ return 1;
+
+ val = readl(isp->base + BUTTRESS_REG_SECURITY_CTL);
+
+ return (val & BUTTRESS_SECURITY_CTL_FW_SETUP_MASK) ==
+ BUTTRESS_SECURITY_CTL_AUTH_DONE;
+}
+EXPORT_SYMBOL(ipu_buttress_auth_done);
+
+static void ipu_buttress_set_psys_ratio(struct ipu_device *isp,
+ unsigned int psys_divisor,
+ unsigned int psys_qos_floor)
+{
+ struct ipu_buttress_ctrl *ctrl = isp->psys->ctrl;
+
+ mutex_lock(&isp->buttress.power_mutex);
+
+ if (ctrl->divisor == psys_divisor && ctrl->qos_floor == psys_qos_floor)
+ goto out_mutex_unlock;
+
+ ctrl->divisor = psys_divisor;
+ ctrl->qos_floor = psys_qos_floor;
+
+ if (ctrl->started) {
+ /*
+ * According to documentation driver initiates DVFS
+ * transition by writing wanted ratio, floor ratio and start
+ * bit. No need to stop PS first
+ */
+ writel(BUTTRESS_FREQ_CTL_START |
+ ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT |
+ psys_divisor, isp->base + BUTTRESS_REG_PS_FREQ_CTL);
+ }
+
+out_mutex_unlock:
+ mutex_unlock(&isp->buttress.power_mutex);
+}
+
+static void ipu_buttress_set_isys_ratio(struct ipu_device *isp,
+ unsigned int isys_divisor)
+{
+ struct ipu_buttress_ctrl *ctrl = isp->isys->ctrl;
+
+ mutex_lock(&isp->buttress.power_mutex);
+
+ if (ctrl->divisor == isys_divisor)
+ goto out_mutex_unlock;
+
+ ctrl->divisor = isys_divisor;
+
+ if (ctrl->started) {
+ writel(BUTTRESS_FREQ_CTL_START |
+ ctrl->qos_floor << BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT |
+ isys_divisor, isp->base + BUTTRESS_REG_IS_FREQ_CTL);
+ }
+
+out_mutex_unlock:
+ mutex_unlock(&isp->buttress.power_mutex);
+}
+
+static void ipu_buttress_set_psys_freq(struct ipu_device *isp,
+ unsigned int freq)
+{
+ unsigned int psys_ratio = freq / BUTTRESS_PS_FREQ_STEP;
+
+ if (isp->buttress.psys_force_ratio)
+ return;
+
+ ipu_buttress_set_psys_ratio(isp, psys_ratio, psys_ratio);
+}
+
+void
+ipu_buttress_add_psys_constraint(struct ipu_device *isp,
+ struct ipu_buttress_constraint *constraint)
+{
+ struct ipu_buttress *b = &isp->buttress;
+
+ mutex_lock(&b->cons_mutex);
+ list_add(&constraint->list, &b->constraints);
+
+ if (constraint->min_freq > b->psys_min_freq) {
+ isp->buttress.psys_min_freq = min(constraint->min_freq,
+ b->psys_fused_freqs.max_freq);
+ ipu_buttress_set_psys_freq(isp, b->psys_min_freq);
+ }
+ mutex_unlock(&b->cons_mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_add_psys_constraint);
+
+void
+ipu_buttress_remove_psys_constraint(struct ipu_device *isp,
+ struct ipu_buttress_constraint *constraint)
+{
+ struct ipu_buttress *b = &isp->buttress;
+ struct ipu_buttress_constraint *c;
+ unsigned int min_freq = 0;
+
+ mutex_lock(&b->cons_mutex);
+ list_del(&constraint->list);
+
+ if (constraint->min_freq >= b->psys_min_freq) {
+ list_for_each_entry(c, &b->constraints, list)
+ if (c->min_freq > min_freq)
+ min_freq = c->min_freq;
+
+ b->psys_min_freq = clamp(min_freq,
+ b->psys_fused_freqs.efficient_freq,
+ b->psys_fused_freqs.max_freq);
+ ipu_buttress_set_psys_freq(isp, b->psys_min_freq);
+ }
+ mutex_unlock(&b->cons_mutex);
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_remove_psys_constraint);
+
+int ipu_buttress_reset_authentication(struct ipu_device *isp)
+{
+ int ret;
+ u32 val;
+
+ if (!isp->secure_mode) {
+ dev_dbg(&isp->pdev->dev,
+ "Non-secure mode -> skip authentication\n");
+ return 0;
+ }
+
+ writel(BUTTRESS_FW_RESET_CTL_START, isp->base +
+ BUTTRESS_REG_FW_RESET_CTL);
+
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_FW_RESET_CTL, val,
+ val & BUTTRESS_FW_RESET_CTL_DONE, 500,
+ BUTTRESS_CSE_FWRESET_TIMEOUT);
+ if (ret) {
+ dev_err(&isp->pdev->dev,
+ "Time out while resetting authentication state\n");
+ } else {
+ dev_info(&isp->pdev->dev,
+ "FW reset for authentication done\n");
+ writel(0, isp->base + BUTTRESS_REG_FW_RESET_CTL);
+ /* leave some time for HW restore */
+ usleep_range(800, 1000);
+ }
+
+ return ret;
+}
+
+int ipu_buttress_map_fw_image(struct ipu_bus_device *sys,
+ const struct firmware *fw, struct sg_table *sgt)
+{
+ struct page **pages;
+ const void *addr;
+ unsigned long n_pages, i;
+ int rval;
+
+ n_pages = PAGE_ALIGN(fw->size) >> PAGE_SHIFT;
+
+ pages = kmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ addr = fw->data;
+ for (i = 0; i < n_pages; i++) {
+ struct page *p = vmalloc_to_page(addr);
+
+ if (!p) {
+ rval = -ENODEV;
+ goto out;
+ }
+ pages[i] = p;
+ addr += PAGE_SIZE;
+ }
+
+ rval = sg_alloc_table_from_pages(sgt, pages, n_pages, 0, fw->size,
+ GFP_KERNEL);
+ if (rval) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ n_pages = dma_map_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ if (n_pages != sgt->nents) {
+ rval = -ENOMEM;
+ sg_free_table(sgt);
+ goto out;
+ }
+
+ dma_sync_sg_for_device(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+
+out:
+ kfree(pages);
+
+ return rval;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_map_fw_image);
+
+int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys,
+ struct sg_table *sgt)
+{
+ dma_unmap_sg(&sys->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
+ sg_free_table(sgt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_unmap_fw_image);
+
+int ipu_buttress_authenticate(struct ipu_device *isp)
+{
+ struct ipu_psys_pdata *psys_pdata;
+ struct ipu_buttress *b = &isp->buttress;
+ u32 data, mask, done, fail;
+ int rval;
+
+ if (!isp->secure_mode) {
+ dev_dbg(&isp->pdev->dev,
+ "Non-secure mode -> skip authentication\n");
+ return 0;
+ }
+
+ psys_pdata = isp->psys->pdata;
+
+ mutex_lock(&b->auth_mutex);
+
+ if (ipu_buttress_auth_done(isp)) {
+ rval = 0;
+ goto iunit_power_off;
+ }
+
+ /*
+ * Write address of FIT table to FW_SOURCE register
+ * Let's use fw address. I.e. not using FIT table yet
+ */
+ data = lower_32_bits(isp->pkg_dir_dma_addr);
+ writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_LO);
+
+ data = upper_32_bits(isp->pkg_dir_dma_addr);
+ writel(data, isp->base + BUTTRESS_REG_FW_SOURCE_BASE_HI);
+
+ /*
+ * Write boot_load into IU2CSEDATA0
+ * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+ * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+ */
+ dev_info(&isp->pdev->dev, "Sending BOOT_LOAD to CSE\n");
+ rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE,
+ BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD,
+ 1, 1,
+ BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+ goto iunit_power_off;
+ }
+
+ mask = BUTTRESS_SECURITY_CTL_FW_SETUP_MASK;
+ done = BUTTRESS_SECURITY_CTL_FW_SETUP_DONE;
+ fail = BUTTRESS_SECURITY_CTL_AUTH_FAILED;
+ rval = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+ ((data & mask) == done ||
+ (data & mask) == fail), 500,
+ BUTTRESS_CSE_BOOTLOAD_TIMEOUT);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "CSE boot_load timeout\n");
+ goto iunit_power_off;
+ }
+
+ data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL) & mask;
+ if (data == fail) {
+ dev_err(&isp->pdev->dev, "CSE auth failed\n");
+ rval = -EINVAL;
+ goto iunit_power_off;
+ }
+
+ rval = readl_poll_timeout(psys_pdata->base + BOOTLOADER_STATUS_OFFSET,
+ data, data == BOOTLOADER_MAGIC_KEY, 500,
+ BUTTRESS_CSE_BOOTLOAD_TIMEOUT);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Expect magic number timeout 0x%x\n",
+ data);
+ goto iunit_power_off;
+ }
+
+ /*
+ * Write authenticate_run into IU2CSEDATA0
+ * Write sizeof(boot_load) | 0x2 << CLIENT_ID to
+ * IU2CSEDB.IU2CSECMD and set IU2CSEDB.IU2CSEBUSY as
+ */
+ dev_info(&isp->pdev->dev, "Sending AUTHENTICATE_RUN to CSE\n");
+ rval = ipu_buttress_ipc_send(isp, IPU_BUTTRESS_IPC_CSE,
+ BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN,
+ 1, 1,
+ BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "CSE authenticate_run failed\n");
+ goto iunit_power_off;
+ }
+
+ done = BUTTRESS_SECURITY_CTL_AUTH_DONE;
+ rval = readl_poll_timeout(isp->base + BUTTRESS_REG_SECURITY_CTL, data,
+ ((data & mask) == done ||
+ (data & mask) == fail), 500,
+ BUTTRESS_CSE_AUTHENTICATE_TIMEOUT);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "CSE authenticate timeout\n");
+ goto iunit_power_off;
+ }
+
+ data = readl(isp->base + BUTTRESS_REG_SECURITY_CTL) & mask;
+ if (data == fail) {
+ dev_err(&isp->pdev->dev, "CSE boot_load failed\n");
+ rval = -EINVAL;
+ goto iunit_power_off;
+ }
+
+ dev_info(&isp->pdev->dev, "CSE authenticate_run done\n");
+
+iunit_power_off:
+ mutex_unlock(&b->auth_mutex);
+
+ return rval;
+}
+
+static int ipu_buttress_send_tsc_request(struct ipu_device *isp)
+{
+ u32 val, mask, shift, done;
+ int ret;
+
+ mask = BUTTRESS_PWR_STATE_HH_STATUS_MASK;
+ shift = BUTTRESS_PWR_STATE_HH_STATUS_SHIFT;
+
+ writel(BUTTRESS_FABRIC_CMD_START_TSC_SYNC,
+ isp->base + BUTTRESS_REG_FABRIC_CMD);
+
+ val = readl(isp->base + BUTTRESS_REG_PWR_STATE);
+ val = (val & mask) >> shift;
+ if (val == BUTTRESS_PWR_STATE_HH_STATE_ERR) {
+ dev_err(&isp->pdev->dev, "Start tsc sync failed\n");
+ return -EINVAL;
+ }
+
+ done = BUTTRESS_PWR_STATE_HH_STATE_DONE;
+ ret = readl_poll_timeout(isp->base + BUTTRESS_REG_PWR_STATE, val,
+ ((val & mask) >> shift == done), 500,
+ BUTTRESS_TSC_SYNC_TIMEOUT);
+ if (ret)
+ dev_err(&isp->pdev->dev, "Start tsc sync timeout\n");
+
+ return ret;
+}
+
+int ipu_buttress_start_tsc_sync(struct ipu_device *isp)
+{
+ unsigned int i;
+
+ for (i = 0; i < BUTTRESS_TSC_SYNC_RESET_TRIAL_MAX; i++) {
+ int ret;
+
+ ret = ipu_buttress_send_tsc_request(isp);
+ if (ret == -ETIMEDOUT) {
+ u32 val;
+ /* set tsw soft reset */
+ val = readl(isp->base + BUTTRESS_REG_TSW_CTL);
+ val = val | BUTTRESS_TSW_CTL_SOFT_RESET;
+ writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+ /* clear tsw soft reset */
+ val = val & (~BUTTRESS_TSW_CTL_SOFT_RESET);
+ writel(val, isp->base + BUTTRESS_REG_TSW_CTL);
+
+ continue;
+ }
+ return ret;
+ }
+
+ dev_err(&isp->pdev->dev, "TSC sync failed(timeout)\n");
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(ipu_buttress_start_tsc_sync);
+
+struct clk_ipu_sensor {
+ struct ipu_device *isp;
+ struct clk_hw hw;
+ unsigned int id;
+ unsigned long rate;
+};
+
+#define to_clk_ipu_sensor(_hw) container_of(_hw, struct clk_ipu_sensor, hw)
+
+int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val)
+{
+ u32 tsc_hi_1, tsc_hi_2, tsc_lo;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ tsc_hi_1 = readl(isp->base + BUTTRESS_REG_TSC_HI);
+ tsc_lo = readl(isp->base + BUTTRESS_REG_TSC_LO);
+ tsc_hi_2 = readl(isp->base + BUTTRESS_REG_TSC_HI);
+ if (tsc_hi_1 == tsc_hi_2) {
+ *val = (u64)tsc_hi_1 << 32 | tsc_lo;
+ } else {
+ /* Check if TSC has rolled over */
+ if (tsc_lo & BIT(31))
+ *val = (u64)tsc_hi_1 << 32 | tsc_lo;
+ else
+ *val = (u64)tsc_hi_2 << 32 | tsc_lo;
+ }
+ local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_tsc_read);
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ipu_buttress_reg_open(struct inode *inode, struct file *file)
+{
+ if (!inode->i_private)
+ return -EACCES;
+
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ipu_buttress_reg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct debugfs_reg32 *reg = file->private_data;
+ u8 tmp[11];
+ u32 val = readl((void __iomem *)reg->offset);
+ int len = scnprintf(tmp, sizeof(tmp), "0x%08x", val);
+
+ return simple_read_from_buffer(buf, len, ppos, &tmp, len);
+}
+
+static ssize_t ipu_buttress_reg_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct debugfs_reg32 *reg = file->private_data;
+ u32 val;
+ int rval;
+
+ rval = kstrtou32_from_user(buf, count, 0, &val);
+ if (rval)
+ return rval;
+
+ writel(val, (void __iomem *)reg->offset);
+
+ return count;
+}
+
+static struct debugfs_reg32 buttress_regs[] = {
+ {"IU2CSEDB0", BUTTRESS_REG_IU2CSEDB0},
+ {"IU2CSEDATA0", BUTTRESS_REG_IU2CSEDATA0},
+ {"CSE2IUDB0", BUTTRESS_REG_CSE2IUDB0},
+ {"CSE2IUDATA0", BUTTRESS_REG_CSE2IUDATA0},
+ {"CSE2IUCSR", BUTTRESS_REG_CSE2IUCSR},
+ {"IU2CSECSR", BUTTRESS_REG_IU2CSECSR},
+};
+
+static const struct file_operations ipu_buttress_reg_fops = {
+ .owner = THIS_MODULE,
+ .open = ipu_buttress_reg_open,
+ .read = ipu_buttress_reg_read,
+ .write = ipu_buttress_reg_write,
+};
+
+static int ipu_buttress_start_tsc_sync_set(void *data, u64 val)
+{
+ struct ipu_device *isp = data;
+
+ return ipu_buttress_start_tsc_sync(isp);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_start_tsc_sync_fops, NULL,
+ ipu_buttress_start_tsc_sync_set, "%llu\n");
+
+static int ipu_buttress_tsc_get(void *data, u64 *val)
+{
+ return ipu_buttress_tsc_read(data, val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_tsc_fops, ipu_buttress_tsc_get,
+ NULL, "%llu\n");
+
+static int ipu_buttress_psys_force_freq_get(void *data, u64 *val)
+{
+ struct ipu_device *isp = data;
+
+ *val = isp->buttress.psys_force_ratio * BUTTRESS_PS_FREQ_STEP;
+
+ return 0;
+}
+
+static int ipu_buttress_psys_force_freq_set(void *data, u64 val)
+{
+ struct ipu_device *isp = data;
+
+ if (val && (val < BUTTRESS_MIN_FORCE_PS_FREQ ||
+ val > BUTTRESS_MAX_FORCE_PS_FREQ))
+ return -EINVAL;
+
+ do_div(val, BUTTRESS_PS_FREQ_STEP);
+ isp->buttress.psys_force_ratio = val;
+
+ if (isp->buttress.psys_force_ratio)
+ ipu_buttress_set_psys_ratio(isp,
+ isp->buttress.psys_force_ratio,
+ isp->buttress.psys_force_ratio);
+ else
+ ipu_buttress_set_psys_freq(isp, isp->buttress.psys_min_freq);
+
+ return 0;
+}
+
+static int ipu_buttress_isys_freq_get(void *data, u64 *val)
+{
+ struct ipu_device *isp = data;
+ u32 reg_val;
+ int rval;
+
+ rval = pm_runtime_get_sync(&isp->isys->dev);
+ if (rval < 0) {
+ pm_runtime_put(&isp->isys->dev);
+ dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval);
+ return rval;
+ }
+
+ reg_val = readl(isp->base + BUTTRESS_REG_IS_FREQ_CTL);
+
+ pm_runtime_put(&isp->isys->dev);
+
+ *val = IPU_IS_FREQ_RATIO_BASE *
+ (reg_val & IPU_BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK);
+
+ return 0;
+}
+
+static int ipu_buttress_isys_freq_set(void *data, u64 val)
+{
+ struct ipu_device *isp = data;
+ int rval;
+
+ if (val < BUTTRESS_MIN_FORCE_IS_FREQ ||
+ val > BUTTRESS_MAX_FORCE_IS_FREQ)
+ return -EINVAL;
+
+ rval = pm_runtime_get_sync(&isp->isys->dev);
+ if (rval < 0) {
+ pm_runtime_put(&isp->isys->dev);
+ dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval);
+ return rval;
+ }
+
+ do_div(val, BUTTRESS_IS_FREQ_STEP);
+ if (val)
+ ipu_buttress_set_isys_ratio(isp, val);
+
+ pm_runtime_put(&isp->isys->dev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_force_freq_fops,
+ ipu_buttress_psys_force_freq_get,
+ ipu_buttress_psys_force_freq_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_psys_freq_fops,
+ ipu_buttress_psys_freq_get, NULL, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(ipu_buttress_isys_freq_fops,
+ ipu_buttress_isys_freq_get,
+ ipu_buttress_isys_freq_set, "%llu\n");
+
+int ipu_buttress_debugfs_init(struct ipu_device *isp)
+{
+ struct debugfs_reg32 *reg =
+ devm_kcalloc(&isp->pdev->dev, ARRAY_SIZE(buttress_regs),
+ sizeof(*reg), GFP_KERNEL);
+ struct dentry *dir, *file;
+ int i;
+
+ if (!reg)
+ return -ENOMEM;
+
+ dir = debugfs_create_dir("buttress", isp->ipu_dir);
+ if (!dir)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(buttress_regs); i++, reg++) {
+ reg->offset = (unsigned long)isp->base +
+ buttress_regs[i].offset;
+ reg->name = buttress_regs[i].name;
+ file = debugfs_create_file(reg->name, 0700,
+ dir, reg, &ipu_buttress_reg_fops);
+ if (!file)
+ goto err;
+ }
+
+ file = debugfs_create_file("start_tsc_sync", 0200, dir, isp,
+ &ipu_buttress_start_tsc_sync_fops);
+ if (!file)
+ goto err;
+ file = debugfs_create_file("tsc", 0400, dir, isp,
+ &ipu_buttress_tsc_fops);
+ if (!file)
+ goto err;
+ file = debugfs_create_file("psys_force_freq", 0700, dir, isp,
+ &ipu_buttress_psys_force_freq_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("psys_freq", 0400, dir, isp,
+ &ipu_buttress_psys_freq_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("isys_freq", 0700, dir, isp,
+ &ipu_buttress_isys_freq_fops);
+ if (!file)
+ goto err;
+
+ return 0;
+err:
+ debugfs_remove_recursive(dir);
+ return -ENOMEM;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks)
+{
+ u64 ns = ticks * 10000;
+ /*
+ * TSC clock frequency is 19.2MHz,
+ * converting TSC tick count to ns is calculated by:
+ * ns = ticks * 1000 000 000 / 19.2Mhz
+ * = ticks * 1000 000 000 / 19200000Hz
+ * = ticks * 10000 / 192 ns
+ */
+ do_div(ns, 192);
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(ipu_buttress_tsc_ticks_to_ns);
+
+static ssize_t psys_fused_min_freq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ isp->buttress.psys_fused_freqs.min_freq);
+}
+
+static DEVICE_ATTR_RO(psys_fused_min_freq);
+
+static ssize_t psys_fused_max_freq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ isp->buttress.psys_fused_freqs.max_freq);
+}
+
+static DEVICE_ATTR_RO(psys_fused_max_freq);
+
+static ssize_t psys_fused_efficient_freq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ isp->buttress.psys_fused_freqs.efficient_freq);
+}
+
+static DEVICE_ATTR_RO(psys_fused_efficient_freq);
+
+int ipu_buttress_restore(struct ipu_device *isp)
+{
+ struct ipu_buttress *b = &isp->buttress;
+
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+ writel(b->wdt_cached_value, isp->base + BUTTRESS_REG_WDT);
+
+ return 0;
+}
+
+int ipu_buttress_init(struct ipu_device *isp)
+{
+ struct ipu_buttress *b = &isp->buttress;
+ int rval, ipc_reset_retry = BUTTRESS_CSE_IPC_RESET_RETRY;
+
+ mutex_init(&b->power_mutex);
+ mutex_init(&b->auth_mutex);
+ mutex_init(&b->cons_mutex);
+ mutex_init(&b->ipc_mutex);
+ init_completion(&b->ish.send_complete);
+ init_completion(&b->cse.send_complete);
+ init_completion(&b->ish.recv_complete);
+ init_completion(&b->cse.recv_complete);
+
+ b->cse.nack = BUTTRESS_CSE2IUDATA0_IPC_NACK;
+ b->cse.nack_mask = BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK;
+ b->cse.csr_in = BUTTRESS_REG_CSE2IUCSR;
+ b->cse.csr_out = BUTTRESS_REG_IU2CSECSR;
+ b->cse.db0_in = BUTTRESS_REG_CSE2IUDB0;
+ b->cse.db0_out = BUTTRESS_REG_IU2CSEDB0;
+ b->cse.data0_in = BUTTRESS_REG_CSE2IUDATA0;
+ b->cse.data0_out = BUTTRESS_REG_IU2CSEDATA0;
+
+ /* no ISH on IPU6 */
+ memset(&b->ish, 0, sizeof(b->ish));
+ INIT_LIST_HEAD(&b->constraints);
+
+ ipu_buttress_set_secure_mode(isp);
+ isp->secure_mode = ipu_buttress_get_secure_mode(isp);
+ if (isp->secure_mode != secure_mode_enable)
+ dev_warn(&isp->pdev->dev, "Unable to set secure mode\n");
+
+ dev_info(&isp->pdev->dev, "IPU in %s mode\n",
+ isp->secure_mode ? "secure" : "non-secure");
+
+ b->wdt_cached_value = readl(isp->base + BUTTRESS_REG_WDT);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_CLEAR);
+ writel(BUTTRESS_IRQS, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ rval = device_create_file(&isp->pdev->dev,
+ &dev_attr_psys_fused_min_freq);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Create min freq file failed\n");
+ goto err_mutex_destroy;
+ }
+
+ rval = device_create_file(&isp->pdev->dev,
+ &dev_attr_psys_fused_max_freq);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Create max freq file failed\n");
+ goto err_remove_min_freq_file;
+ }
+
+ rval = device_create_file(&isp->pdev->dev,
+ &dev_attr_psys_fused_efficient_freq);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Create efficient freq file failed\n");
+ goto err_remove_max_freq_file;
+ }
+
+ /*
+ * We want to retry couple of time in case CSE initialization
+ * is delayed for reason or another.
+ */
+ do {
+ rval = ipu_buttress_ipc_reset(isp, &b->cse);
+ if (rval) {
+ dev_warn(&isp->pdev->dev,
+ "IPC reset protocol failed, retrying\n");
+ } else {
+ dev_info(&isp->pdev->dev, "IPC reset done\n");
+ return 0;
+ }
+ } while (ipc_reset_retry--);
+
+ dev_err(&isp->pdev->dev, "IPC reset protocol failed\n");
+
+err_remove_max_freq_file:
+ device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq);
+err_remove_min_freq_file:
+ device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq);
+err_mutex_destroy:
+ mutex_destroy(&b->power_mutex);
+ mutex_destroy(&b->auth_mutex);
+ mutex_destroy(&b->cons_mutex);
+ mutex_destroy(&b->ipc_mutex);
+
+ return rval;
+}
+
+void ipu_buttress_exit(struct ipu_device *isp)
+{
+ struct ipu_buttress *b = &isp->buttress;
+
+ writel(0, isp->base + BUTTRESS_REG_ISR_ENABLE);
+
+ device_remove_file(&isp->pdev->dev,
+ &dev_attr_psys_fused_efficient_freq);
+ device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_max_freq);
+ device_remove_file(&isp->pdev->dev, &dev_attr_psys_fused_min_freq);
+
+ mutex_destroy(&b->power_mutex);
+ mutex_destroy(&b->auth_mutex);
+ mutex_destroy(&b->cons_mutex);
+ mutex_destroy(&b->ipc_mutex);
+}
new file mode 100644
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_BUTTRESS_H
+#define IPU_BUTTRESS_H
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include "ipu.h"
+
+#define IPU_BUTTRESS_NUM_OF_SENS_CKS 3
+#define IPU_BUTTRESS_NUM_OF_PLL_CKS 3
+#define IPU_BUTTRESS_TSC_CLK 19200000
+
+#define BUTTRESS_POWER_TIMEOUT 200000
+
+#define BUTTRESS_PS_FREQ_STEP 25U
+#define BUTTRESS_MIN_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 8)
+#define BUTTRESS_MAX_FORCE_PS_FREQ (BUTTRESS_PS_FREQ_STEP * 32)
+
+#define BUTTRESS_IS_FREQ_STEP 25U
+#define BUTTRESS_MIN_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 8)
+#define BUTTRESS_MAX_FORCE_IS_FREQ (BUTTRESS_IS_FREQ_STEP * 16)
+
+struct ipu_buttress_ctrl {
+ u32 freq_ctl, pwr_sts_shift, pwr_sts_mask, pwr_sts_on, pwr_sts_off;
+ union {
+ unsigned int divisor;
+ unsigned int ratio;
+ };
+ union {
+ unsigned int divisor_shift;
+ unsigned int ratio_shift;
+ };
+ unsigned int qos_floor;
+ bool started;
+};
+
+struct ipu_buttress_fused_freqs {
+ unsigned int min_freq;
+ unsigned int max_freq;
+ unsigned int efficient_freq;
+};
+
+struct ipu_buttress_ipc {
+ struct completion send_complete;
+ struct completion recv_complete;
+ u32 nack;
+ u32 nack_mask;
+ u32 recv_data;
+ u32 csr_out;
+ u32 csr_in;
+ u32 db0_in;
+ u32 db0_out;
+ u32 data0_out;
+ u32 data0_in;
+};
+
+struct ipu_buttress {
+ struct mutex power_mutex, auth_mutex, cons_mutex, ipc_mutex;
+ struct ipu_buttress_ipc cse;
+ struct ipu_buttress_ipc ish;
+ struct list_head constraints;
+ struct ipu_buttress_fused_freqs psys_fused_freqs;
+ unsigned int psys_min_freq;
+ u32 wdt_cached_value;
+ u8 psys_force_ratio;
+ bool force_suspend;
+};
+
+struct ipu_buttress_sensor_clk_freq {
+ unsigned int rate;
+ unsigned int val;
+};
+
+struct firmware;
+
+enum ipu_buttress_ipc_domain {
+ IPU_BUTTRESS_IPC_CSE,
+ IPU_BUTTRESS_IPC_ISH,
+};
+
+struct ipu_buttress_constraint {
+ struct list_head list;
+ unsigned int min_freq;
+};
+
+struct ipu_ipc_buttress_bulk_msg {
+ u32 cmd;
+ u32 expected_resp;
+ bool require_resp;
+ u8 cmd_size;
+};
+
+int ipu_buttress_ipc_reset(struct ipu_device *isp,
+ struct ipu_buttress_ipc *ipc);
+int ipu_buttress_map_fw_image(struct ipu_bus_device *sys,
+ const struct firmware *fw, struct sg_table *sgt);
+int ipu_buttress_unmap_fw_image(struct ipu_bus_device *sys,
+ struct sg_table *sgt);
+int ipu_buttress_power(struct device *dev,
+ struct ipu_buttress_ctrl *ctrl, bool on);
+void
+ipu_buttress_add_psys_constraint(struct ipu_device *isp,
+ struct ipu_buttress_constraint *constraint);
+void
+ipu_buttress_remove_psys_constraint(struct ipu_device *isp,
+ struct ipu_buttress_constraint *constraint);
+void ipu_buttress_set_secure_mode(struct ipu_device *isp);
+bool ipu_buttress_get_secure_mode(struct ipu_device *isp);
+int ipu_buttress_authenticate(struct ipu_device *isp);
+int ipu_buttress_reset_authentication(struct ipu_device *isp);
+bool ipu_buttress_auth_done(struct ipu_device *isp);
+int ipu_buttress_start_tsc_sync(struct ipu_device *isp);
+int ipu_buttress_tsc_read(struct ipu_device *isp, u64 *val);
+u64 ipu_buttress_tsc_ticks_to_ns(u64 ticks);
+
+irqreturn_t ipu_buttress_isr(int irq, void *isp_ptr);
+irqreturn_t ipu_buttress_isr_threaded(int irq, void *isp_ptr);
+int ipu_buttress_debugfs_init(struct ipu_device *isp);
+int ipu_buttress_init(struct ipu_device *isp);
+void ipu_buttress_exit(struct ipu_device *isp);
+void ipu_buttress_csi_port_config(struct ipu_device *isp,
+ u32 legacy, u32 combo);
+int ipu_buttress_restore(struct ipu_device *isp);
+
+int ipu_buttress_psys_freq_get(void *data, u64 *val);
+#endif /* IPU_BUTTRESS_H */
new file mode 100644
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2020 Intel Corporation
+
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include "ipu.h"
+#include "ipu-cpd.h"
+
+/* 15 entries + header*/
+#define MAX_PKG_DIR_ENT_CNT 16
+/* 2 qword per entry/header */
+#define PKG_DIR_ENT_LEN 2
+/* PKG_DIR size in bytes */
+#define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \
+ (PKG_DIR_ENT_LEN) * sizeof(u64))
+#define PKG_DIR_ID_SHIFT 48
+#define PKG_DIR_ID_MASK 0x7f
+#define PKG_DIR_VERSION_SHIFT 32
+#define PKG_DIR_SIZE_MASK 0xfffff
+/* _IUPKDR_ */
+#define PKG_DIR_HDR_MARK 0x5f4955504b44525f
+
+/* $CPD */
+#define CPD_HDR_MARK 0x44504324
+
+/* Maximum size is 2K DWORDs */
+#define MAX_MANIFEST_SIZE (2 * 1024 * sizeof(u32))
+
+/* Maximum size is 64k */
+#define MAX_METADATA_SIZE (64 * 1024)
+
+#define MAX_COMPONENT_ID 127
+#define MAX_COMPONENT_VERSION 0xffff
+
+#define CPD_MANIFEST_IDX 0
+#define CPD_METADATA_IDX 1
+#define CPD_MODULEDATA_IDX 2
+
+static inline struct ipu_cpd_ent *ipu_cpd_get_entries(const void *cpd)
+{
+ const struct ipu_cpd_hdr *cpd_hdr = cpd;
+
+ return (struct ipu_cpd_ent *)((u8 *)cpd + cpd_hdr->hdr_len);
+}
+
+#define ipu_cpd_get_entry(cpd, idx) (&ipu_cpd_get_entries(cpd)[idx])
+#define ipu_cpd_get_manifest(cpd) ipu_cpd_get_entry(cpd, CPD_MANIFEST_IDX)
+#define ipu_cpd_get_metadata(cpd) ipu_cpd_get_entry(cpd, CPD_METADATA_IDX)
+#define ipu_cpd_get_moduledata(cpd) ipu_cpd_get_entry(cpd, CPD_MODULEDATA_IDX)
+
+static const struct ipu_cpd_metadata_cmpnt *
+ipu_cpd_metadata_get_cmpnt(struct ipu_device *isp,
+ const void *metadata,
+ unsigned int metadata_size,
+ u8 idx)
+{
+ const struct ipu_cpd_metadata_extn *extn;
+ const struct ipu_cpd_metadata_cmpnt *cmpnts;
+ int cmpnt_count;
+
+ extn = metadata;
+ cmpnts = metadata + sizeof(*extn);
+ cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts);
+
+ if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) {
+ dev_err(&isp->pdev->dev, "Component index out of range (%d)\n",
+ idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cmpnts[idx];
+}
+
+static u32 ipu_cpd_metadata_cmpnt_version(struct ipu_device *isp,
+ const void *metadata,
+ unsigned int metadata_size, u8 idx)
+{
+ const struct ipu_cpd_metadata_cmpnt *cmpnt =
+ ipu_cpd_metadata_get_cmpnt(isp, metadata,
+ metadata_size, idx);
+
+ if (IS_ERR(cmpnt))
+ return PTR_ERR(cmpnt);
+
+ return cmpnt->ver;
+}
+
+static int ipu_cpd_metadata_get_cmpnt_id(struct ipu_device *isp,
+ const void *metadata,
+ unsigned int metadata_size, u8 idx)
+{
+ const struct ipu_cpd_metadata_cmpnt *cmpnt =
+ ipu_cpd_metadata_get_cmpnt(isp, metadata,
+ metadata_size, idx);
+
+ if (IS_ERR(cmpnt))
+ return PTR_ERR(cmpnt);
+
+ return cmpnt->id;
+}
+
+static const struct ipu6_cpd_metadata_cmpnt *
+ipu6_cpd_metadata_get_cmpnt(struct ipu_device *isp,
+ const void *metadata,
+ unsigned int metadata_size,
+ u8 idx)
+{
+ const struct ipu_cpd_metadata_extn *extn = metadata;
+ const struct ipu6_cpd_metadata_cmpnt *cmpnts = metadata + sizeof(*extn);
+ int cmpnt_count;
+
+ cmpnt_count = (metadata_size - sizeof(*extn)) / sizeof(*cmpnts);
+ if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) {
+ dev_err(&isp->pdev->dev, "Component index out of range (%d)\n",
+ idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &cmpnts[idx];
+}
+
+static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu_device *isp,
+ const void *metadata,
+ unsigned int metadata_size, u8 idx)
+{
+ const struct ipu6_cpd_metadata_cmpnt *cmpnt =
+ ipu6_cpd_metadata_get_cmpnt(isp, metadata,
+ metadata_size, idx);
+
+ if (IS_ERR(cmpnt))
+ return PTR_ERR(cmpnt);
+
+ return cmpnt->ver;
+}
+
+static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu_device *isp,
+ const void *metadata,
+ unsigned int metadata_size, u8 idx)
+{
+ const struct ipu6_cpd_metadata_cmpnt *cmpnt =
+ ipu6_cpd_metadata_get_cmpnt(isp, metadata,
+ metadata_size, idx);
+
+ if (IS_ERR(cmpnt))
+ return PTR_ERR(cmpnt);
+
+ return cmpnt->id;
+}
+
+static int ipu_cpd_parse_module_data(struct ipu_device *isp,
+ const void *module_data,
+ unsigned int module_data_size,
+ dma_addr_t dma_addr_module_data,
+ u64 *pkg_dir,
+ const void *metadata,
+ unsigned int metadata_size)
+{
+ const struct ipu_cpd_module_data_hdr *module_data_hdr;
+ const struct ipu_cpd_hdr *dir_hdr;
+ const struct ipu_cpd_ent *dir_ent;
+ int i;
+ u8 len;
+
+ if (!module_data)
+ return -EINVAL;
+
+ module_data_hdr = module_data;
+ dir_hdr = module_data + module_data_hdr->hdr_len;
+ len = dir_hdr->hdr_len;
+ dir_ent = (struct ipu_cpd_ent *)(((u8 *)dir_hdr) + len);
+
+ pkg_dir[0] = PKG_DIR_HDR_MARK;
+ /* pkg_dir entry count = component count + pkg_dir header */
+ pkg_dir[1] = dir_hdr->ent_cnt + 1;
+
+ for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) {
+ u64 *p = &pkg_dir[PKG_DIR_ENT_LEN + i * PKG_DIR_ENT_LEN];
+ int ver, id;
+
+ *p++ = dma_addr_module_data + dir_ent->offset;
+
+ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP)
+ id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata,
+ metadata_size, i);
+ else
+ id = ipu_cpd_metadata_get_cmpnt_id(isp, metadata,
+ metadata_size, i);
+
+ if (id < 0 || id > MAX_COMPONENT_ID) {
+ dev_err(&isp->pdev->dev,
+ "Failed to parse component id\n");
+ return -EINVAL;
+ }
+
+ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP)
+ ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata,
+ metadata_size, i);
+ else
+ ver = ipu_cpd_metadata_cmpnt_version(isp, metadata,
+ metadata_size, i);
+
+ if (ver < 0 || ver > MAX_COMPONENT_VERSION) {
+ dev_err(&isp->pdev->dev,
+ "Failed to parse component version\n");
+ return -EINVAL;
+ }
+
+ /*
+ * PKG_DIR Entry (type == id)
+ * 63:56 55 54:48 47:32 31:24 23:0
+ * Rsvd Rsvd Type Version Rsvd Size
+ */
+ *p = dir_ent->len | (u64)id << PKG_DIR_ID_SHIFT |
+ (u64)ver << PKG_DIR_VERSION_SHIFT;
+ }
+
+ return 0;
+}
+
+void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev,
+ const void *src,
+ dma_addr_t dma_addr_src,
+ dma_addr_t *dma_addr, unsigned int *pkg_dir_size)
+{
+ struct ipu_device *isp = adev->isp;
+ const struct ipu_cpd_ent *ent, *man_ent, *met_ent;
+ u64 *pkg_dir;
+ unsigned int man_sz, met_sz;
+ void *pkg_dir_pos;
+ int ret;
+
+ man_ent = ipu_cpd_get_manifest(src);
+ man_sz = man_ent->len;
+
+ met_ent = ipu_cpd_get_metadata(src);
+ met_sz = met_ent->len;
+
+ *pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz;
+ pkg_dir = dma_alloc_attrs(&adev->dev, *pkg_dir_size, dma_addr,
+ GFP_KERNEL,
+ 0);
+ if (!pkg_dir)
+ return pkg_dir;
+
+ /*
+ * pkg_dir entry/header:
+ * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0
+ * N Address/Offset/"_IUPKDR_"
+ * N + 1 | rsvd | rsvd | type | ver | rsvd | size
+ *
+ * We can ignore other fields that size in N + 1 qword as they
+ * are 0 anyway. Just setting size for now.
+ */
+
+ ent = ipu_cpd_get_moduledata(src);
+
+ ret = ipu_cpd_parse_module_data(isp, src + ent->offset,
+ ent->len,
+ dma_addr_src + ent->offset,
+ pkg_dir,
+ src + met_ent->offset, met_ent->len);
+ if (ret) {
+ dev_err(&isp->pdev->dev,
+ "Unable to parse module data section!\n");
+ dma_free_attrs(&isp->psys->dev, *pkg_dir_size, pkg_dir,
+ *dma_addr,
+ 0);
+ return NULL;
+ }
+
+ /* Copy manifest after pkg_dir */
+ pkg_dir_pos = pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT;
+ memcpy(pkg_dir_pos, src + man_ent->offset, man_sz);
+
+ /* Copy metadata after manifest */
+ pkg_dir_pos += man_sz;
+ memcpy(pkg_dir_pos, src + met_ent->offset, met_sz);
+
+ dma_sync_single_range_for_device(&adev->dev, *dma_addr,
+ 0, *pkg_dir_size, DMA_TO_DEVICE);
+
+ return pkg_dir;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_create_pkg_dir);
+
+void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev,
+ u64 *pkg_dir,
+ dma_addr_t dma_addr, unsigned int pkg_dir_size)
+{
+ dma_free_attrs(&adev->dev, pkg_dir_size, pkg_dir, dma_addr, 0);
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_free_pkg_dir);
+
+static int ipu_cpd_validate_cpd(struct ipu_device *isp,
+ const void *cpd,
+ unsigned long cpd_size, unsigned long data_size)
+{
+ const struct ipu_cpd_hdr *cpd_hdr = cpd;
+ struct ipu_cpd_ent *ent;
+ unsigned int i;
+ u8 len;
+
+ len = cpd_hdr->hdr_len;
+
+ /* Ensure cpd hdr is within moduledata */
+ if (cpd_size < len) {
+ dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n");
+ return -EINVAL;
+ }
+
+ /* Sanity check for CPD header */
+ if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) {
+ dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+ return -EINVAL;
+ }
+
+ /* Ensure that all entries are within moduledata */
+ ent = (struct ipu_cpd_ent *)(((u8 *)cpd_hdr) + len);
+ for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) {
+ if (data_size < ent->offset ||
+ data_size - ent->offset < ent->len) {
+ dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ipu_cpd_validate_moduledata(struct ipu_device *isp,
+ const void *moduledata,
+ u32 moduledata_size)
+{
+ const struct ipu_cpd_module_data_hdr *mod_hdr = moduledata;
+ int rval;
+
+ /* Ensure moduledata hdr is within moduledata */
+ if (moduledata_size < sizeof(*mod_hdr) ||
+ moduledata_size < mod_hdr->hdr_len) {
+ dev_err(&isp->pdev->dev, "Invalid moduledata size\n");
+ return -EINVAL;
+ }
+
+ dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date);
+ rval = ipu_cpd_validate_cpd(isp, moduledata +
+ mod_hdr->hdr_len,
+ moduledata_size -
+ mod_hdr->hdr_len, moduledata_size);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ipu_cpd_validate_metadata(struct ipu_device *isp,
+ const void *metadata, u32 meta_size)
+{
+ const struct ipu_cpd_metadata_extn *extn = metadata;
+ unsigned int size;
+
+ /* Sanity check for metadata size */
+ if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) {
+ dev_err(&isp->pdev->dev, "%s: Invalid metadata\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Validate extension and image types */
+ if (extn->extn_type != IPU_CPD_METADATA_EXTN_TYPE_IUNIT ||
+ extn->img_type != IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) {
+ dev_err(&isp->pdev->dev,
+ "Invalid metadata descriptor img_type (%d)\n",
+ extn->img_type);
+ return -EINVAL;
+ }
+
+ /* Validate metadata size multiple of metadata components */
+ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP)
+ size = sizeof(struct ipu6_cpd_metadata_cmpnt);
+ else
+ size = sizeof(struct ipu_cpd_metadata_cmpnt);
+
+ if ((meta_size - sizeof(*extn)) % size) {
+ dev_err(&isp->pdev->dev, "%s: Invalid metadata size\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ipu_cpd_validate_cpd_file(struct ipu_device *isp,
+ const void *cpd_file, unsigned long cpd_file_size)
+{
+ const struct ipu_cpd_hdr *hdr = cpd_file;
+ struct ipu_cpd_ent *ent;
+ int rval;
+
+ rval = ipu_cpd_validate_cpd(isp, cpd_file,
+ cpd_file_size, cpd_file_size);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Invalid CPD in file\n");
+ return -EINVAL;
+ }
+
+ /* Check for CPD file marker */
+ if (hdr->hdr_mark != CPD_HDR_MARK) {
+ dev_err(&isp->pdev->dev, "Invalid CPD header\n");
+ return -EINVAL;
+ }
+
+ /* Sanity check for manifest size */
+ ent = ipu_cpd_get_manifest(cpd_file);
+ if (ent->len > MAX_MANIFEST_SIZE) {
+ dev_err(&isp->pdev->dev, "Invalid manifest size\n");
+ return -EINVAL;
+ }
+
+ /* Validate metadata */
+ ent = ipu_cpd_get_metadata(cpd_file);
+ rval = ipu_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Invalid metadata\n");
+ return rval;
+ }
+
+ /* Validate moduledata */
+ ent = ipu_cpd_get_moduledata(cpd_file);
+ rval = ipu_cpd_validate_moduledata(isp, cpd_file + ent->offset,
+ ent->len);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Invalid moduledata\n");
+ return rval;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_validate_cpd_file);
+
+unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx)
+{
+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN];
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_address);
+
+unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir)
+{
+ return pkg_dir[1];
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_num_entries);
+
+unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx)
+{
+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] & PKG_DIR_SIZE_MASK;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_size);
+
+unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx)
+{
+ return pkg_dir[++pkg_dir_idx * PKG_DIR_ENT_LEN + 1] >>
+ PKG_DIR_ID_SHIFT & PKG_DIR_ID_MASK;
+}
+EXPORT_SYMBOL_GPL(ipu_cpd_pkg_dir_get_type);
new file mode 100644
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2015 - 2020 Intel Corporation */
+
+#ifndef IPU_CPD_H
+#define IPU_CPD_H
+
+#define IPU_CPD_SIZE_OF_FW_ARCH_VERSION 7
+#define IPU_CPD_SIZE_OF_SYSTEM_VERSION 11
+#define IPU_CPD_SIZE_OF_COMPONENT_NAME 12
+
+#define IPU_CPD_METADATA_EXTN_TYPE_IUNIT 0x10
+
+#define IPU_CPD_METADATA_IMAGE_TYPE_RESERVED 0
+#define IPU_CPD_METADATA_IMAGE_TYPE_BOOTLOADER 1
+#define IPU_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE 2
+
+#define IPU_CPD_PKG_DIR_PSYS_SERVER_IDX 0
+#define IPU_CPD_PKG_DIR_ISYS_SERVER_IDX 1
+
+#define IPU_CPD_PKG_DIR_CLIENT_PG_TYPE 3
+
+#define IPU6_CPD_METADATA_HASH_KEY_SIZE 48
+#define IPU_CPD_METADATA_HASH_KEY_SIZE 32
+
+struct __packed ipu_cpd_module_data_hdr {
+ u32 hdr_len;
+ u32 endian;
+ u32 fw_pkg_date;
+ u32 hive_sdk_date;
+ u32 compiler_date;
+ u32 target_platform_type;
+ u8 sys_ver[IPU_CPD_SIZE_OF_SYSTEM_VERSION];
+ u8 fw_arch_ver[IPU_CPD_SIZE_OF_FW_ARCH_VERSION];
+ u8 rsvd[2];
+};
+
+/* ipu_cpd_hdr structure updated as the chksum and
+ * sub_partition_name is unused on host side
+ * CSE layout version 1.6 for ipu6se (hdr_len = 0x10)
+ * CSE layout version 1.7 for ipu6 (hdr_len = 0x14)
+ */
+struct __packed ipu_cpd_hdr {
+ u32 hdr_mark;
+ u32 ent_cnt;
+ u8 hdr_ver;
+ u8 ent_ver;
+ u8 hdr_len;
+};
+
+struct __packed ipu_cpd_ent {
+ u8 name[IPU_CPD_SIZE_OF_COMPONENT_NAME];
+ u32 offset;
+ u32 len;
+ u8 rsvd[4];
+};
+
+struct __packed ipu_cpd_metadata_cmpnt {
+ u32 id;
+ u32 size;
+ u32 ver;
+ u8 sha2_hash[IPU_CPD_METADATA_HASH_KEY_SIZE];
+ u32 entry_point;
+ u32 icache_base_offs;
+ u8 attrs[16];
+};
+
+struct __packed ipu6_cpd_metadata_cmpnt {
+ u32 id;
+ u32 size;
+ u32 ver;
+ u8 sha2_hash[IPU6_CPD_METADATA_HASH_KEY_SIZE];
+ u32 entry_point;
+ u32 icache_base_offs;
+ u8 attrs[16];
+};
+
+struct __packed ipu_cpd_metadata_extn {
+ u32 extn_type;
+ u32 len;
+ u32 img_type;
+ u8 rsvd[16];
+};
+
+struct __packed ipu_cpd_client_pkg_hdr {
+ u32 prog_list_offs;
+ u32 prog_list_size;
+ u32 prog_desc_offs;
+ u32 prog_desc_size;
+ u32 pg_manifest_offs;
+ u32 pg_manifest_size;
+ u32 prog_bin_offs;
+ u32 prog_bin_size;
+};
+
+void *ipu_cpd_create_pkg_dir(struct ipu_bus_device *adev,
+ const void *src,
+ dma_addr_t dma_addr_src,
+ dma_addr_t *dma_addr, unsigned int *pkg_dir_size);
+void ipu_cpd_free_pkg_dir(struct ipu_bus_device *adev,
+ u64 *pkg_dir,
+ dma_addr_t dma_addr, unsigned int pkg_dir_size);
+int ipu_cpd_validate_cpd_file(struct ipu_device *isp,
+ const void *cpd_file,
+ unsigned long cpd_file_size);
+unsigned int ipu_cpd_pkg_dir_get_address(const u64 *pkg_dir, int pkg_dir_idx);
+unsigned int ipu_cpd_pkg_dir_get_num_entries(const u64 *pkg_dir);
+unsigned int ipu_cpd_pkg_dir_get_size(const u64 *pkg_dir, int pkg_dir_idx);
+unsigned int ipu_cpd_pkg_dir_get_type(const u64 *pkg_dir, int pkg_dir_idx);
+
+#endif /* IPU_CPD_H */
new file mode 100644
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2021 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/iova.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-map-ops.h>
+
+#include "ipu-dma.h"
+#include "ipu-bus.h"
+#include "ipu-mmu.h"
+
+struct vm_info {
+ struct list_head list;
+ struct page **pages;
+ void *vaddr;
+ unsigned long size;
+};
+
+static struct vm_info *get_vm_info(struct ipu_mmu *mmu, void *vaddr)
+{
+ struct vm_info *info, *save;
+
+ list_for_each_entry_safe(info, save, &mmu->vma_list, list) {
+ if (info->vaddr == vaddr)
+ return info;
+ }
+
+ return NULL;
+}
+
+/* Begin of things adapted from arch/arm/mm/dma-mapping.c */
+static void __dma_clear_buffer(struct page *page, size_t size,
+ unsigned long attrs)
+{
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ void *ptr = page_address(page);
+
+ memset(ptr, 0, size);
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ clflush_cache_range(ptr, size);
+}
+
+static struct page **__dma_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp,
+ unsigned long attrs)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ pages = kvzalloc(array_size, GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ gfp |= __GFP_NOWARN;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+ }
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ kvfree(pages);
+ return NULL;
+}
+
+static int __dma_free_buffer(struct device *dev, struct page **pages,
+ size_t size,
+ unsigned long attrs)
+{
+ int count = size >> PAGE_SHIFT;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (pages[i]) {
+ __dma_clear_buffer(pages[i], PAGE_SIZE, attrs);
+ __free_pages(pages[i], 0);
+ }
+ }
+
+ kvfree(pages);
+ return 0;
+}
+
+/* End of things adapted from arch/arm/mm/dma-mapping.c */
+
+static void ipu_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+ unsigned long pa = ipu_mmu_iova_to_phys(mmu->dmap->mmu_info,
+ dma_handle);
+
+ clflush_cache_range(phys_to_virt(pa), size);
+}
+
+static void ipu_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ clflush_cache_range(page_to_virt(sg_page(sg)), sg->length);
+}
+
+static void *ipu_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+{
+ struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+ struct page **pages;
+ struct iova *iova;
+ struct vm_info *info;
+ int i;
+ int rval;
+ unsigned long count;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ count = size >> PAGE_SHIFT;
+
+ iova = alloc_iova(&mmu->dmap->iovad, count,
+ dma_get_mask(dev) >> PAGE_SHIFT, 0);
+ if (!iova)
+ goto out_kfree;
+
+ pages = __dma_alloc_buffer(dev, size, gfp, attrs);
+ if (!pages)
+ goto out_free_iova;
+
+ for (i = 0; iova->pfn_lo + i <= iova->pfn_hi; i++) {
+ rval = ipu_mmu_map(mmu->dmap->mmu_info,
+ (iova->pfn_lo + i) << PAGE_SHIFT,
+ page_to_phys(pages[i]), PAGE_SIZE);
+ if (rval)
+ goto out_unmap;
+ }
+
+ info->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
+ if (!info->vaddr)
+ goto out_unmap;
+
+ *dma_handle = iova->pfn_lo << PAGE_SHIFT;
+
+ info->pages = pages;
+ info->size = size;
+ list_add(&info->list, &mmu->vma_list);
+
+ return info->vaddr;
+
+out_unmap:
+ for (i--; i >= 0; i--) {
+ ipu_mmu_unmap(mmu->dmap->mmu_info,
+ (iova->pfn_lo + i) << PAGE_SHIFT, PAGE_SIZE);
+ }
+ __dma_free_buffer(dev, pages, size, attrs);
+
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+out_kfree:
+ kfree(info);
+
+ return NULL;
+}
+
+static void ipu_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle,
+ unsigned long attrs)
+{
+ struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+ struct page **pages;
+ struct vm_info *info;
+ struct iova *iova = find_iova(&mmu->dmap->iovad,
+ dma_handle >> PAGE_SHIFT);
+
+ if (WARN_ON(!iova))
+ return;
+
+ info = get_vm_info(mmu, vaddr);
+ if (WARN_ON(!info))
+ return;
+
+ if (WARN_ON(!info->vaddr))
+ return;
+
+ if (WARN_ON(!info->pages))
+ return;
+
+ list_del(&info->list);
+
+ size = PAGE_ALIGN(size);
+
+ pages = info->pages;
+
+ vunmap(vaddr);
+
+ ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+
+ __dma_free_buffer(dev, pages, size, attrs);
+
+ mmu->tlb_invalidate(mmu);
+
+ __free_iova(&mmu->dmap->iovad, iova);
+
+ kfree(info);
+}
+
+static int ipu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *addr, dma_addr_t iova, size_t size,
+ unsigned long attrs)
+{
+ struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+ struct vm_info *info;
+ size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t i;
+
+ info = get_vm_info(mmu, addr);
+ if (!info)
+ return -EFAULT;
+
+ if (!info->vaddr)
+ return -EFAULT;
+
+ if (vma->vm_start & ~PAGE_MASK)
+ return -EINVAL;
+
+ if (size > info->size)
+ return -EFAULT;
+
+ for (i = 0; i < count; i++)
+ vm_insert_page(vma, vma->vm_start + (i << PAGE_SHIFT),
+ info->pages[i]);
+
+ return 0;
+}
+
+static void ipu_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+ struct iova *iova = find_iova(&mmu->dmap->iovad,
+ sg_dma_address(sglist) >> PAGE_SHIFT);
+
+ if (!nents)
+ return;
+
+ if (WARN_ON(!iova))
+ return;
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+
+ mmu->tlb_invalidate(mmu);
+
+ __free_iova(&mmu->dmap->iovad, iova);
+}
+
+static int ipu_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+ struct scatterlist *sg;
+ struct iova *iova;
+ size_t size = 0;
+ u32 iova_addr;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+ size += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
+
+ dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size);
+
+ iova = alloc_iova(&mmu->dmap->iovad, size,
+ dma_get_mask(dev) >> PAGE_SHIFT, 0);
+ if (!iova)
+ return 0;
+
+ dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo,
+ iova->pfn_hi);
+
+ iova_addr = iova->pfn_lo;
+
+ for_each_sg(sglist, sg, nents, i) {
+ int rval;
+
+ dev_dbg(dev, "mapping entry %d: iova 0x%8.8x,phy 0x%16.16llx\n",
+ i, iova_addr << PAGE_SHIFT,
+ (unsigned long long)page_to_phys(sg_page(sg)));
+ rval = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
+ page_to_phys(sg_page(sg)),
+ PAGE_ALIGN(sg->length));
+ if (rval)
+ goto out_fail;
+ sg_dma_address(sg) = iova_addr << PAGE_SHIFT;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg_dma_len(sg) = sg->length;
+#endif /* CONFIG_NEED_SG_DMA_LENGTH */
+
+ iova_addr += PAGE_ALIGN(sg->length) >> PAGE_SHIFT;
+ }
+
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ ipu_dma_sync_sg_for_cpu(dev, sglist, nents, DMA_BIDIRECTIONAL);
+
+ mmu->tlb_invalidate(mmu);
+
+ return nents;
+
+out_fail:
+ ipu_dma_unmap_sg(dev, sglist, i, dir, attrs);
+
+ return 0;
+}
+
+/*
+ * Create scatter-list for the already allocated DMA buffer
+ */
+static int ipu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+ struct ipu_mmu *mmu = to_ipu_bus_device(dev)->mmu;
+ struct vm_info *info;
+ int n_pages;
+ int ret = 0;
+
+ info = get_vm_info(mmu, cpu_addr);
+ if (!info)
+ return -EFAULT;
+
+ if (!info->vaddr)
+ return -EFAULT;
+
+ if (WARN_ON(!info->pages))
+ return -ENOMEM;
+
+ n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ ret = sg_alloc_table_from_pages(sgt, info->pages, n_pages, 0, size,
+ GFP_KERNEL);
+ if (ret)
+ dev_dbg(dev, "IPU get sgt table fail\n");
+
+ return ret;
+}
+
+const struct dma_map_ops ipu_dma_ops = {
+ .alloc = ipu_dma_alloc,
+ .free = ipu_dma_free,
+ .mmap = ipu_dma_mmap,
+ .map_sg = ipu_dma_map_sg,
+ .unmap_sg = ipu_dma_unmap_sg,
+ .sync_single_for_cpu = ipu_dma_sync_single_for_cpu,
+ .sync_single_for_device = ipu_dma_sync_single_for_cpu,
+ .sync_sg_for_cpu = ipu_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = ipu_dma_sync_sg_for_cpu,
+ .get_sgtable = ipu_dma_get_sgtable,
+};
new file mode 100644
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_DMA_H
+#define IPU_DMA_H
+
+#include <linux/iova.h>
+
+struct ipu_mmu_info;
+
+struct ipu_dma_mapping {
+ struct ipu_mmu_info *mmu_info;
+ struct iova_domain iovad;
+ struct kref ref;
+};
+
+extern const struct dma_map_ops ipu_dma_ops;
+
+#endif /* IPU_DMA_H */
new file mode 100644
@@ -0,0 +1,496 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include "ipu.h"
+#include "ipu-fw-com.h"
+#include "ipu-bus.h"
+
+/*
+ * FWCOM layer is a shared resource between FW and driver. It consist
+ * of token queues to both send and receive directions. Queue is simply
+ * an array of structures with read and write indexes to the queue.
+ * There are 1...n queues to both directions. Queues locates in
+ * system ram and are mapped to ISP MMU so that both CPU and ISP can
+ * see the same buffer. Indexes are located in ISP DMEM so that FW code
+ * can poll those with very low latency and cost. CPU access to indexes is
+ * more costly but that happens only at message sending time and
+ * interrupt trigged message handling. CPU doesn't need to poll indexes.
+ * wr_reg / rd_reg are offsets to those dmem location. They are not
+ * the indexes itself.
+ */
+
+/* Shared structure between driver and FW - do not modify */
+struct ipu_fw_sys_queue {
+ u64 host_address;
+ u32 vied_address;
+ u32 size;
+ u32 token_size;
+ u32 wr_reg; /* reg no in subsystem's regmem */
+ u32 rd_reg;
+ u32 _align;
+};
+
+struct ipu_fw_sys_queue_res {
+ u64 host_address;
+ u32 vied_address;
+ u32 reg;
+};
+
+enum syscom_state {
+ /* Program load or explicit host setting should init to this */
+ SYSCOM_STATE_UNINIT = 0x57A7E000,
+ /* SP Syscom sets this when it is ready for use */
+ SYSCOM_STATE_READY = 0x57A7E001,
+ /* SP Syscom sets this when no more syscom accesses will happen */
+ SYSCOM_STATE_INACTIVE = 0x57A7E002
+};
+
+enum syscom_cmd {
+ /* Program load or explicit host setting should init to this */
+ SYSCOM_COMMAND_UNINIT = 0x57A7F000,
+ /* Host Syscom requests syscom to become inactive */
+ SYSCOM_COMMAND_INACTIVE = 0x57A7F001
+};
+
+/* firmware config: data that sent from the host to SP via DDR */
+/* Cell copies data into a context */
+
+struct ipu_fw_syscom_config {
+ u32 firmware_address;
+
+ u32 num_input_queues;
+ u32 num_output_queues;
+
+ /* ISP pointers to an array of ipu_fw_sys_queue structures */
+ u32 input_queue;
+ u32 output_queue;
+
+ /* ISYS / PSYS private data */
+ u32 specific_addr;
+ u32 specific_size;
+};
+
+/* End of shared structures / data */
+
+struct ipu_fw_com_context {
+ struct ipu_bus_device *adev;
+ void __iomem *dmem_addr;
+ int (*cell_ready)(struct ipu_bus_device *adev);
+ void (*cell_start)(struct ipu_bus_device *adev);
+
+ void *dma_buffer;
+ dma_addr_t dma_addr;
+ unsigned int dma_size;
+ unsigned long attrs;
+
+ unsigned int num_input_queues;
+ unsigned int num_output_queues;
+
+ struct ipu_fw_sys_queue *input_queue; /* array of host to SP queues */
+ struct ipu_fw_sys_queue *output_queue; /* array of SP to host */
+
+ void *config_host_addr;
+ void *specific_host_addr;
+ u64 ibuf_host_addr;
+ u64 obuf_host_addr;
+
+ u32 config_vied_addr;
+ u32 input_queue_vied_addr;
+ u32 output_queue_vied_addr;
+ u32 specific_vied_addr;
+ u32 ibuf_vied_addr;
+ u32 obuf_vied_addr;
+
+ unsigned int buttress_boot_offset;
+ void __iomem *base_addr;
+};
+
+#define FW_COM_WR_REG 0
+#define FW_COM_RD_REG 4
+
+#define REGMEM_OFFSET 0
+#define TUNIT_MAGIC_PATTERN 0x5a5a5a5a
+
+enum regmem_id {
+ /* pass pkg_dir address to SPC in non-secure mode */
+ PKG_DIR_ADDR_REG = 0,
+ /* Tunit CFG blob for secure - provided by host.*/
+ TUNIT_CFG_DWR_REG = 1,
+ /* syscom commands - modified by the host */
+ SYSCOM_COMMAND_REG = 2,
+ /* Store interrupt status - updated by SP */
+ SYSCOM_IRQ_REG = 3,
+ /* first syscom queue pointer register */
+ SYSCOM_QPR_BASE_REG = 4
+};
+
+enum message_direction {
+ DIR_RECV = 0,
+ DIR_SEND
+};
+
+#define BUTRESS_FW_BOOT_PARAMS_0 0x4000
+#define BUTTRESS_FW_BOOT_PARAM_REG(base, offset, id) ((base) \
+ + BUTRESS_FW_BOOT_PARAMS_0 + ((offset) + (id)) * 4)
+
+enum buttress_syscom_id {
+ /* pass syscom configuration to SPC */
+ SYSCOM_CONFIG_ID = 0,
+ /* syscom state - modified by SP */
+ SYSCOM_STATE_ID = 1,
+ /* syscom vtl0 addr mask */
+ SYSCOM_VTL0_ADDR_MASK_ID = 2,
+ SYSCOM_ID_MAX
+};
+
+static unsigned int num_messages(unsigned int wr, unsigned int rd,
+ unsigned int size)
+{
+ if (wr < rd)
+ wr += size;
+ return wr - rd;
+}
+
+static unsigned int num_free(unsigned int wr, unsigned int rd,
+ unsigned int size)
+{
+ return size - num_messages(wr, rd, size);
+}
+
+static unsigned int curr_index(void __iomem *q_dmem,
+ enum message_direction dir)
+{
+ return readl(q_dmem +
+ (dir == DIR_RECV ? FW_COM_RD_REG : FW_COM_WR_REG));
+}
+
+static unsigned int inc_index(void __iomem *q_dmem, struct ipu_fw_sys_queue *q,
+ enum message_direction dir)
+{
+ unsigned int index;
+
+ index = curr_index(q_dmem, dir) + 1;
+ return index >= q->size ? 0 : index;
+}
+
+static unsigned int ipu_sys_queue_buf_size(unsigned int size,
+ unsigned int token_size)
+{
+ return (size + 1) * token_size;
+}
+
+static void ipu_sys_queue_init(struct ipu_fw_sys_queue *q, unsigned int size,
+ unsigned int token_size,
+ struct ipu_fw_sys_queue_res *res)
+{
+ unsigned int buf_size;
+
+ q->size = size + 1;
+ q->token_size = token_size;
+ buf_size = ipu_sys_queue_buf_size(size, token_size);
+
+ /* acquire the shared buffer space */
+ q->host_address = res->host_address;
+ res->host_address += buf_size;
+ q->vied_address = res->vied_address;
+ res->vied_address += buf_size;
+
+ /* acquire the shared read and writer pointers */
+ q->wr_reg = res->reg;
+ res->reg++;
+ q->rd_reg = res->reg;
+ res->reg++;
+}
+
+void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg,
+ struct ipu_bus_device *adev, void __iomem *base)
+{
+ struct ipu_fw_com_context *ctx;
+ struct ipu_fw_syscom_config *fw_cfg;
+ unsigned int i;
+ unsigned int sizeall, offset;
+ unsigned int sizeinput = 0, sizeoutput = 0;
+ unsigned long attrs = 0;
+ struct ipu_fw_sys_queue_res res;
+
+ /* error handling */
+ if (!cfg || !cfg->cell_start || !cfg->cell_ready)
+ return NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+ ctx->dmem_addr = base + cfg->dmem_addr + REGMEM_OFFSET;
+ ctx->adev = adev;
+ ctx->cell_start = cfg->cell_start;
+ ctx->cell_ready = cfg->cell_ready;
+ ctx->buttress_boot_offset = cfg->buttress_boot_offset;
+ ctx->base_addr = base;
+
+ ctx->num_input_queues = cfg->num_input_queues;
+ ctx->num_output_queues = cfg->num_output_queues;
+
+ /*
+ * Allocate DMA mapped memory. Allocate one big chunk.
+ */
+ sizeall =
+ /* Base cfg for FW */
+ roundup(sizeof(struct ipu_fw_syscom_config), 8) +
+ /* Descriptions of the queues */
+ cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue) +
+ cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue) +
+ /* FW specific information structure */
+ roundup(cfg->specific_size, 8);
+
+ for (i = 0; i < cfg->num_input_queues; i++)
+ sizeinput += ipu_sys_queue_buf_size(cfg->input[i].queue_size,
+ cfg->input[i].token_size);
+
+ for (i = 0; i < cfg->num_output_queues; i++)
+ sizeoutput += ipu_sys_queue_buf_size(cfg->output[i].queue_size,
+ cfg->output[i].token_size);
+
+ sizeall += sizeinput + sizeoutput;
+
+ ctx->dma_buffer = dma_alloc_attrs(&ctx->adev->dev, sizeall,
+ &ctx->dma_addr, GFP_KERNEL,
+ attrs);
+ ctx->attrs = attrs;
+ if (!ctx->dma_buffer) {
+ dev_err(&ctx->adev->dev, "failed to allocate dma memory\n");
+ kfree(ctx);
+ return NULL;
+ }
+
+ ctx->dma_size = sizeall;
+
+ /* This is the address where FW starts to parse allocations */
+ ctx->config_host_addr = ctx->dma_buffer;
+ ctx->config_vied_addr = ctx->dma_addr;
+ fw_cfg = (struct ipu_fw_syscom_config *)ctx->config_host_addr;
+ offset = roundup(sizeof(struct ipu_fw_syscom_config), 8);
+
+ ctx->input_queue = ctx->dma_buffer + offset;
+ ctx->input_queue_vied_addr = ctx->dma_addr + offset;
+ offset += cfg->num_input_queues * sizeof(struct ipu_fw_sys_queue);
+
+ ctx->output_queue = ctx->dma_buffer + offset;
+ ctx->output_queue_vied_addr = ctx->dma_addr + offset;
+ offset += cfg->num_output_queues * sizeof(struct ipu_fw_sys_queue);
+
+ ctx->specific_host_addr = ctx->dma_buffer + offset;
+ ctx->specific_vied_addr = ctx->dma_addr + offset;
+ offset += roundup(cfg->specific_size, 8);
+
+ ctx->ibuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset);
+ ctx->ibuf_vied_addr = ctx->dma_addr + offset;
+ offset += sizeinput;
+
+ ctx->obuf_host_addr = (uintptr_t)(ctx->dma_buffer + offset);
+ ctx->obuf_vied_addr = ctx->dma_addr + offset;
+ offset += sizeoutput;
+
+ /* initialize input queues */
+ res.reg = SYSCOM_QPR_BASE_REG;
+ res.host_address = ctx->ibuf_host_addr;
+ res.vied_address = ctx->ibuf_vied_addr;
+ for (i = 0; i < cfg->num_input_queues; i++) {
+ ipu_sys_queue_init(ctx->input_queue + i,
+ cfg->input[i].queue_size,
+ cfg->input[i].token_size, &res);
+ }
+
+ /* initialize output queues */
+ res.host_address = ctx->obuf_host_addr;
+ res.vied_address = ctx->obuf_vied_addr;
+ for (i = 0; i < cfg->num_output_queues; i++) {
+ ipu_sys_queue_init(ctx->output_queue + i,
+ cfg->output[i].queue_size,
+ cfg->output[i].token_size, &res);
+ }
+
+ /* copy firmware specific data */
+ if (cfg->specific_addr && cfg->specific_size) {
+ memcpy((void *)ctx->specific_host_addr,
+ cfg->specific_addr, cfg->specific_size);
+ }
+
+ fw_cfg->num_input_queues = cfg->num_input_queues;
+ fw_cfg->num_output_queues = cfg->num_output_queues;
+ fw_cfg->input_queue = ctx->input_queue_vied_addr;
+ fw_cfg->output_queue = ctx->output_queue_vied_addr;
+ fw_cfg->specific_addr = ctx->specific_vied_addr;
+ fw_cfg->specific_size = cfg->specific_size;
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_prepare);
+
+int ipu_fw_com_open(struct ipu_fw_com_context *ctx)
+{
+ /*
+ * Disable tunit configuration by FW.
+ * This feature is used to configure tunit in secure mode.
+ */
+ writel(TUNIT_MAGIC_PATTERN, ctx->dmem_addr + TUNIT_CFG_DWR_REG * 4);
+ /* Check if SP is in valid state */
+ if (!ctx->cell_ready(ctx->adev))
+ return -EIO;
+
+ /* store syscom uninitialized command */
+ writel(SYSCOM_COMMAND_UNINIT,
+ ctx->dmem_addr + SYSCOM_COMMAND_REG * 4);
+
+ /* store syscom uninitialized state */
+ writel(SYSCOM_STATE_UNINIT,
+ BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_STATE_ID));
+
+ /* store firmware configuration address */
+ writel(ctx->config_vied_addr,
+ BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_CONFIG_ID));
+ ctx->cell_start(ctx->adev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_open);
+
+int ipu_fw_com_close(struct ipu_fw_com_context *ctx)
+{
+ int state;
+
+ state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_STATE_ID));
+ if (state != SYSCOM_STATE_READY)
+ return -EBUSY;
+
+ /* set close request flag */
+ writel(SYSCOM_COMMAND_INACTIVE, ctx->dmem_addr +
+ SYSCOM_COMMAND_REG * 4);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_close);
+
+int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force)
+{
+ /* check if release is forced, an verify cell state if it is not */
+ if (!force && !ctx->cell_ready(ctx->adev))
+ return -EBUSY;
+
+ dma_free_attrs(&ctx->adev->dev, ctx->dma_size,
+ ctx->dma_buffer, ctx->dma_addr,
+ ctx->attrs);
+ kfree(ctx);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_release);
+
+int ipu_fw_com_ready(struct ipu_fw_com_context *ctx)
+{
+ int state;
+
+ state = readl(BUTTRESS_FW_BOOT_PARAM_REG(ctx->base_addr,
+ ctx->buttress_boot_offset,
+ SYSCOM_STATE_ID));
+ if (state != SYSCOM_STATE_READY)
+ return -EBUSY; /* SPC is not ready to handle messages yet */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_fw_com_ready);
+
+static bool is_index_valid(struct ipu_fw_sys_queue *q, unsigned int index)
+{
+ if (index >= q->size)
+ return false;
+ return true;
+}
+
+void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ unsigned int wr, rd;
+ unsigned int packets;
+ unsigned int index;
+
+ wr = readl(q_dmem + FW_COM_WR_REG);
+ rd = readl(q_dmem + FW_COM_RD_REG);
+
+ /* Catch indexes in dmem */
+ if (!is_index_valid(q, wr) || !is_index_valid(q, rd))
+ return NULL;
+
+ packets = num_free(wr + 1, rd, q->size);
+ if (!packets)
+ return NULL;
+
+ index = curr_index(q_dmem, DIR_SEND);
+
+ return (void *)(unsigned long)q->host_address + (index * q->token_size);
+}
+EXPORT_SYMBOL_GPL(ipu_send_get_token);
+
+void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu_fw_sys_queue *q = &ctx->input_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ int index = curr_index(q_dmem, DIR_SEND);
+
+ /* Increment index */
+ index = inc_index(q_dmem, q, DIR_SEND);
+
+ writel(index, q_dmem + FW_COM_WR_REG);
+}
+EXPORT_SYMBOL_GPL(ipu_send_put_token);
+
+void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ unsigned int wr, rd;
+ unsigned int packets;
+ void *addr;
+
+ wr = readl(q_dmem + FW_COM_WR_REG);
+ rd = readl(q_dmem + FW_COM_RD_REG);
+
+ /* Catch indexes in dmem? */
+ if (!is_index_valid(q, wr) || !is_index_valid(q, rd))
+ return NULL;
+
+ packets = num_messages(wr, rd, q->size);
+ if (!packets)
+ return NULL;
+
+ addr = (void *)(unsigned long)q->host_address + (rd * q->token_size);
+
+ return addr;
+}
+EXPORT_SYMBOL_GPL(ipu_recv_get_token);
+
+void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr)
+{
+ struct ipu_fw_sys_queue *q = &ctx->output_queue[q_nbr];
+ void __iomem *q_dmem = ctx->dmem_addr + q->wr_reg * 4;
+ unsigned int rd = inc_index(q_dmem, q, DIR_RECV);
+
+ /* Release index */
+ writel(rd, q_dmem + FW_COM_RD_REG);
+}
+EXPORT_SYMBOL_GPL(ipu_recv_put_token);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu fw comm library");
new file mode 100644
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_FW_COM_H
+#define IPU_FW_COM_H
+
+struct ipu_fw_com_context;
+struct ipu_bus_device;
+
+struct ipu_fw_syscom_queue_config {
+ unsigned int queue_size; /* tokens per queue */
+ unsigned int token_size; /* bytes per token */
+};
+
+#define SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET 0
+#define SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET 7
+
+struct ipu_fw_com_cfg {
+ unsigned int num_input_queues;
+ unsigned int num_output_queues;
+ struct ipu_fw_syscom_queue_config *input;
+ struct ipu_fw_syscom_queue_config *output;
+
+ unsigned int dmem_addr;
+
+ /* firmware-specific configuration data */
+ void *specific_addr;
+ unsigned int specific_size;
+ int (*cell_ready)(struct ipu_bus_device *adev);
+ void (*cell_start)(struct ipu_bus_device *adev);
+
+ unsigned int buttress_boot_offset;
+};
+
+void *ipu_fw_com_prepare(struct ipu_fw_com_cfg *cfg,
+ struct ipu_bus_device *adev, void __iomem *base);
+
+int ipu_fw_com_open(struct ipu_fw_com_context *ctx);
+int ipu_fw_com_ready(struct ipu_fw_com_context *ctx);
+int ipu_fw_com_close(struct ipu_fw_com_context *ctx);
+int ipu_fw_com_release(struct ipu_fw_com_context *ctx, unsigned int force);
+
+void *ipu_recv_get_token(struct ipu_fw_com_context *ctx, int q_nbr);
+void ipu_recv_put_token(struct ipu_fw_com_context *ctx, int q_nbr);
+void *ipu_send_get_token(struct ipu_fw_com_context *ctx, int q_nbr);
+void ipu_send_put_token(struct ipu_fw_com_context *ctx, int q_nbr);
+
+#endif
new file mode 100644
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2021 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include "ipu.h"
+#include "ipu-trace.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform.h"
+#include "ipu-fw-isys.h"
+#include "ipu-fw-com.h"
+#include "ipu-isys.h"
+
+#define IPU_FW_UNSUPPORTED_DATA_TYPE 0
+static const uint32_t
+extracted_bits_per_pixel_per_mipi_data_type[N_IPU_FW_ISYS_MIPI_DATA_TYPE] = {
+ 64, /* [0x00] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE */
+ 64, /* [0x01] IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE */
+ 64, /* [0x02] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE */
+ 64, /* [0x03] IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x04] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x05] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x06] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x07] */
+ 64, /* [0x08] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 */
+ 64, /* [0x09] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 */
+ 64, /* [0x0A] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 */
+ 64, /* [0x0B] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 */
+ 64, /* [0x0C] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 */
+ 64, /* [0x0D] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 */
+ 64, /* [0x0E] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 */
+ 64, /* [0x0F] IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x10] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x11] */
+ 8, /* [0x12] IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x13] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x14] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x15] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x16] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x17] */
+ 12, /* [0x18] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 */
+ 15, /* [0x19] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 */
+ 12, /* [0x1A] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x1B] */
+ 12, /* [0x1C] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT */
+ 15, /* [0x1D] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT */
+ 16, /* [0x1E] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 */
+ 20, /* [0x1F] IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 */
+ 16, /* [0x20] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 */
+ 16, /* [0x21] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 */
+ 16, /* [0x22] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 */
+ 18, /* [0x23] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 */
+ 24, /* [0x24] IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x25] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x26] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x27] */
+ 6, /* [0x28] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 */
+ 7, /* [0x29] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 */
+ 8, /* [0x2A] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 */
+ 10, /* [0x2B] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 */
+ 12, /* [0x2C] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 */
+ 14, /* [0x2D] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 */
+ 16, /* [0x2E] IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 */
+ 8, /* [0x2F] IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 */
+ 8, /* [0x30] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 */
+ 8, /* [0x31] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 */
+ 8, /* [0x32] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 */
+ 8, /* [0x33] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 */
+ 8, /* [0x34] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 */
+ 8, /* [0x35] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 */
+ 8, /* [0x36] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 */
+ 8, /* [0x37] IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x38] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x39] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3A] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3B] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3C] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3D] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE, /* [0x3E] */
+ IPU_FW_UNSUPPORTED_DATA_TYPE /* [0x3F] */
+};
+
+static const char send_msg_types[N_IPU_FW_ISYS_SEND_TYPE][32] = {
+ "STREAM_OPEN",
+ "STREAM_START",
+ "STREAM_START_AND_CAPTURE",
+ "STREAM_CAPTURE",
+ "STREAM_STOP",
+ "STREAM_FLUSH",
+ "STREAM_CLOSE"
+};
+
+static int handle_proxy_response(struct ipu_isys *isys, unsigned int req_id)
+{
+ struct ipu_fw_isys_proxy_resp_info_abi *resp;
+ int rval = -EIO;
+
+ resp = (struct ipu_fw_isys_proxy_resp_info_abi *)
+ ipu_recv_get_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES);
+ if (!resp)
+ return 1;
+
+ dev_dbg(&isys->adev->dev,
+ "Proxy response: id 0x%x, error %d, details %d\n",
+ resp->request_id, resp->error_info.error,
+ resp->error_info.error_details);
+
+ if (req_id == resp->request_id)
+ rval = 0;
+
+ ipu_recv_put_token(isys->fwcom, IPU_BASE_PROXY_RECV_QUEUES);
+ return rval;
+}
+
+/* Simple blocking proxy send function */
+int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys,
+ unsigned int req_id,
+ unsigned int index,
+ unsigned int offset, u32 value)
+{
+ struct ipu_fw_com_context *ctx = isys->fwcom;
+ struct ipu_fw_proxy_send_queue_token *token;
+ unsigned int timeout = 1000;
+ int rval = -EBUSY;
+
+ dev_dbg(&isys->adev->dev,
+ "proxy send token: req_id 0x%x, index %d, offset 0x%x, value 0x%x\n",
+ req_id, index, offset, value);
+
+ token = ipu_send_get_token(ctx, IPU_BASE_PROXY_SEND_QUEUES);
+ if (!token)
+ goto leave;
+
+ token->request_id = req_id;
+ token->region_index = index;
+ token->offset = offset;
+ token->value = value;
+ ipu_send_put_token(ctx, IPU_BASE_PROXY_SEND_QUEUES);
+
+ /* Currently proxy doesn't support irq based service. Poll */
+ do {
+ usleep_range(100, 110);
+ rval = handle_proxy_response(isys, req_id);
+ if (!rval)
+ break;
+ if (rval == -EIO) {
+ dev_err(&isys->adev->dev,
+ "Proxy response received with unexpected id\n");
+ break;
+ }
+ timeout--;
+ } while (rval && timeout);
+
+ if (!timeout)
+ dev_err(&isys->adev->dev, "Proxy response timed out\n");
+leave:
+ return rval;
+}
+
+int
+ipu_fw_isys_complex_cmd(struct ipu_isys *isys,
+ const unsigned int stream_handle,
+ void *cpu_mapped_buf,
+ dma_addr_t dma_mapped_buf,
+ size_t size, enum ipu_fw_isys_send_type send_type)
+{
+ struct ipu_fw_com_context *ctx = isys->fwcom;
+ struct ipu_fw_send_queue_token *token;
+
+ if (send_type >= N_IPU_FW_ISYS_SEND_TYPE)
+ return -EINVAL;
+
+ dev_dbg(&isys->adev->dev, "send_token: %s\n",
+ send_msg_types[send_type]);
+
+ /*
+ * Time to flush cache in case we have some payload. Not all messages
+ * have that
+ */
+ if (cpu_mapped_buf)
+ clflush_cache_range(cpu_mapped_buf, size);
+
+ token = ipu_send_get_token(ctx,
+ stream_handle + IPU_BASE_MSG_SEND_QUEUES);
+ if (!token)
+ return -EBUSY;
+
+ token->payload = dma_mapped_buf;
+ token->buf_handle = (unsigned long)cpu_mapped_buf;
+ token->send_type = send_type;
+
+ ipu_send_put_token(ctx, stream_handle + IPU_BASE_MSG_SEND_QUEUES);
+
+ return 0;
+}
+
+int ipu_fw_isys_simple_cmd(struct ipu_isys *isys,
+ const unsigned int stream_handle,
+ enum ipu_fw_isys_send_type send_type)
+{
+ return ipu_fw_isys_complex_cmd(isys, stream_handle, NULL, 0, 0,
+ send_type);
+}
+
+int ipu_fw_isys_close(struct ipu_isys *isys)
+{
+ struct device *dev = &isys->adev->dev;
+ int timeout = IPU_ISYS_TURNOFF_TIMEOUT;
+ int rval;
+ unsigned long flags;
+ void *fwcom;
+
+ /*
+ * Stop the isys fw. Actual close takes
+ * some time as the FW must stop its actions including code fetch
+ * to SP icache.
+ * spinlock to wait the interrupt handler to be finished
+ */
+ spin_lock_irqsave(&isys->power_lock, flags);
+ rval = ipu_fw_com_close(isys->fwcom);
+ fwcom = isys->fwcom;
+ isys->fwcom = NULL;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+ if (rval)
+ dev_err(dev, "Device close failure: %d\n", rval);
+
+ /* release probably fails if the close failed. Let's try still */
+ do {
+ usleep_range(IPU_ISYS_TURNOFF_DELAY_US,
+ 2 * IPU_ISYS_TURNOFF_DELAY_US);
+ rval = ipu_fw_com_release(fwcom, 0);
+ timeout--;
+ } while (rval != 0 && timeout);
+
+ if (rval) {
+ dev_err(dev, "Device release time out %d\n", rval);
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->fwcom = fwcom;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+ }
+
+ return rval;
+}
+
+void ipu_fw_isys_cleanup(struct ipu_isys *isys)
+{
+ int ret;
+
+ ret = ipu_fw_com_release(isys->fwcom, 1);
+ if (ret < 0)
+ dev_err(&isys->adev->dev,
+ "Device busy, fw_com release failed.");
+ isys->fwcom = NULL;
+}
+
+static void start_sp(struct ipu_bus_device *adev)
+{
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+ void __iomem *spc_regs_base = isys->pdata->base +
+ isys->pdata->ipdata->hw_variant.spc_offset;
+ u32 val = 0;
+
+ val |= IPU_ISYS_SPC_STATUS_START |
+ IPU_ISYS_SPC_STATUS_RUN |
+ IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+ val |= isys->icache_prefetch ? IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH : 0;
+
+ writel(val, spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL);
+}
+
+static int query_sp(struct ipu_bus_device *adev)
+{
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+ void __iomem *spc_regs_base = isys->pdata->base +
+ isys->pdata->ipdata->hw_variant.spc_offset;
+ u32 val = readl(spc_regs_base + IPU_ISYS_REG_SPC_STATUS_CTRL);
+
+ /* return true when READY == 1, START == 0 */
+ val &= IPU_ISYS_SPC_STATUS_READY | IPU_ISYS_SPC_STATUS_START;
+
+ return val == IPU_ISYS_SPC_STATUS_READY;
+}
+
+static int ipu6_isys_fwcom_cfg_init(struct ipu_isys *isys,
+ struct ipu_fw_com_cfg *fwcom,
+ unsigned int num_streams)
+{
+ int i;
+ unsigned int size;
+ struct ipu_fw_syscom_queue_config *input_queue_cfg;
+ struct ipu_fw_syscom_queue_config *output_queue_cfg;
+ struct ipu6_fw_isys_fw_config *isys_fw_cfg;
+ int num_out_message_queues = 1;
+ int type_proxy = IPU_FW_ISYS_QUEUE_TYPE_PROXY;
+ int type_dev = IPU_FW_ISYS_QUEUE_TYPE_DEV;
+ int type_msg = IPU_FW_ISYS_QUEUE_TYPE_MSG;
+ int base_dev_send = IPU_BASE_DEV_SEND_QUEUES;
+ int base_msg_send = IPU_BASE_MSG_SEND_QUEUES;
+ int base_msg_recv = IPU_BASE_MSG_RECV_QUEUES;
+ int num_in_message_queues;
+ unsigned int max_streams;
+ unsigned int max_send_queues, max_sram_blocks, max_devq_size;
+
+ max_streams = IPU6_ISYS_NUM_STREAMS;
+ max_send_queues = IPU6_N_MAX_SEND_QUEUES;
+ max_sram_blocks = IPU6_NOF_SRAM_BLOCKS_MAX;
+ max_devq_size = IPU6_DEV_SEND_QUEUE_SIZE;
+ if (ipu_ver == IPU_VER_6SE) {
+ max_streams = IPU6SE_ISYS_NUM_STREAMS;
+ max_send_queues = IPU6SE_N_MAX_SEND_QUEUES;
+ max_sram_blocks = IPU6SE_NOF_SRAM_BLOCKS_MAX;
+ max_devq_size = IPU6SE_DEV_SEND_QUEUE_SIZE;
+ }
+
+ num_in_message_queues = clamp_t(unsigned int, num_streams, 1,
+ max_streams);
+ isys_fw_cfg = devm_kzalloc(&isys->adev->dev, sizeof(*isys_fw_cfg),
+ GFP_KERNEL);
+ if (!isys_fw_cfg)
+ return -ENOMEM;
+
+ isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
+ IPU_N_MAX_PROXY_SEND_QUEUES;
+ isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] =
+ IPU_N_MAX_DEV_SEND_QUEUES;
+ isys_fw_cfg->num_send_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
+ num_in_message_queues;
+ isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_PROXY] =
+ IPU_N_MAX_PROXY_RECV_QUEUES;
+ /* Common msg/dev return queue */
+ isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_DEV] = 0;
+ isys_fw_cfg->num_recv_queues[IPU_FW_ISYS_QUEUE_TYPE_MSG] =
+ num_out_message_queues;
+
+ size = sizeof(*input_queue_cfg) * max_send_queues;
+ input_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
+ if (!input_queue_cfg)
+ return -ENOMEM;
+
+ size = sizeof(*output_queue_cfg) * IPU_N_MAX_RECV_QUEUES;
+ output_queue_cfg = devm_kzalloc(&isys->adev->dev, size, GFP_KERNEL);
+ if (!output_queue_cfg)
+ return -ENOMEM;
+
+ fwcom->input = input_queue_cfg;
+ fwcom->output = output_queue_cfg;
+
+ fwcom->num_input_queues =
+ isys_fw_cfg->num_send_queues[type_proxy] +
+ isys_fw_cfg->num_send_queues[type_dev] +
+ isys_fw_cfg->num_send_queues[type_msg];
+
+ fwcom->num_output_queues =
+ isys_fw_cfg->num_recv_queues[type_proxy] +
+ isys_fw_cfg->num_recv_queues[type_dev] +
+ isys_fw_cfg->num_recv_queues[type_msg];
+
+ /* SRAM partitioning. Equal partitioning is set. */
+ for (i = 0; i < max_sram_blocks; i++) {
+ if (i < num_in_message_queues)
+ isys_fw_cfg->buffer_partition.num_gda_pages[i] =
+ (IPU_DEVICE_GDA_NR_PAGES *
+ IPU_DEVICE_GDA_VIRT_FACTOR) /
+ num_in_message_queues;
+ else
+ isys_fw_cfg->buffer_partition.num_gda_pages[i] = 0;
+ }
+
+ /* FW assumes proxy interface at fwcom queue 0 */
+ for (i = 0; i < isys_fw_cfg->num_send_queues[type_proxy]; i++) {
+ input_queue_cfg[i].token_size =
+ sizeof(struct ipu_fw_proxy_send_queue_token);
+ input_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_SEND_QUEUE;
+ }
+
+ for (i = 0; i < isys_fw_cfg->num_send_queues[type_dev]; i++) {
+ input_queue_cfg[base_dev_send + i].token_size =
+ sizeof(struct ipu_fw_send_queue_token);
+ input_queue_cfg[base_dev_send + i].queue_size = max_devq_size;
+ }
+
+ for (i = 0; i < isys_fw_cfg->num_send_queues[type_msg]; i++) {
+ input_queue_cfg[base_msg_send + i].token_size =
+ sizeof(struct ipu_fw_send_queue_token);
+ input_queue_cfg[base_msg_send + i].queue_size =
+ IPU_ISYS_SIZE_SEND_QUEUE;
+ }
+
+ for (i = 0; i < isys_fw_cfg->num_recv_queues[type_proxy]; i++) {
+ output_queue_cfg[i].token_size =
+ sizeof(struct ipu_fw_proxy_resp_queue_token);
+ output_queue_cfg[i].queue_size = IPU_ISYS_SIZE_PROXY_RECV_QUEUE;
+ }
+ /* There is no recv DEV queue */
+ for (i = 0; i < isys_fw_cfg->num_recv_queues[type_msg]; i++) {
+ output_queue_cfg[base_msg_recv + i].token_size =
+ sizeof(struct ipu_fw_resp_queue_token);
+ output_queue_cfg[base_msg_recv + i].queue_size =
+ IPU_ISYS_SIZE_RECV_QUEUE;
+ }
+
+ fwcom->dmem_addr = isys->pdata->ipdata->hw_variant.dmem_offset;
+ fwcom->specific_addr = isys_fw_cfg;
+ fwcom->specific_size = sizeof(*isys_fw_cfg);
+
+ return 0;
+}
+
+int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams)
+{
+ int retry = IPU_ISYS_OPEN_RETRY;
+
+ struct ipu_fw_com_cfg fwcom = {
+ .cell_start = start_sp,
+ .cell_ready = query_sp,
+ .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_ISYS_OFFSET,
+ };
+
+ struct device *dev = &isys->adev->dev;
+ int rval;
+
+ ipu6_isys_fwcom_cfg_init(isys, &fwcom, num_streams);
+
+ isys->fwcom = ipu_fw_com_prepare(&fwcom, isys->adev, isys->pdata->base);
+ if (!isys->fwcom) {
+ dev_err(dev, "isys fw com prepare failed\n");
+ return -EIO;
+ }
+
+ rval = ipu_fw_com_open(isys->fwcom);
+ if (rval) {
+ dev_err(dev, "isys fw com open failed %d\n", rval);
+ return rval;
+ }
+
+ do {
+ usleep_range(IPU_ISYS_OPEN_TIMEOUT_US,
+ IPU_ISYS_OPEN_TIMEOUT_US + 10);
+ rval = ipu_fw_com_ready(isys->fwcom);
+ if (!rval)
+ break;
+ retry--;
+ } while (retry > 0);
+
+ if (!retry && rval) {
+ dev_err(dev, "isys port open ready failed %d\n", rval);
+ ipu_fw_isys_close(isys);
+ }
+
+ return rval;
+}
+
+struct ipu_fw_isys_resp_info_abi *
+ipu_fw_isys_get_resp(void *context, unsigned int queue,
+ struct ipu_fw_isys_resp_info_abi *response)
+{
+ return (struct ipu_fw_isys_resp_info_abi *)
+ ipu_recv_get_token(context, queue);
+}
+
+void ipu_fw_isys_put_resp(void *context, unsigned int queue)
+{
+ ipu_recv_put_token(context, queue);
+}
+
+void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg)
+{
+ unsigned int i;
+ unsigned int idx;
+
+ for (i = 0; i < stream_cfg->nof_input_pins; i++) {
+ idx = stream_cfg->input_pins[i].dt;
+ stream_cfg->input_pins[i].bits_per_pix =
+ extracted_bits_per_pixel_per_mipi_data_type[idx];
+ stream_cfg->input_pins[i].mapped_dt =
+ N_IPU_FW_ISYS_MIPI_DATA_TYPE;
+ stream_cfg->input_pins[i].mipi_decompression =
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION;
+ /*
+ * CSI BE can be used to crop and change bayer order.
+ * NOTE: currently it only crops first and last lines in height.
+ */
+ if (stream_cfg->crop.top_offset & 1)
+ stream_cfg->input_pins[i].crop_first_and_last_lines = 1;
+ stream_cfg->input_pins[i].capture_mode =
+ IPU_FW_ISYS_CAPTURE_MODE_REGULAR;
+ }
+}
+
+void
+ipu_fw_isys_dump_stream_cfg(struct device *dev,
+ struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "---------------------------\n");
+ dev_dbg(dev, "IPU_FW_ISYS_STREAM_CFG_DATA\n");
+ dev_dbg(dev, "---------------------------\n");
+
+ dev_dbg(dev, "Source %d\n", stream_cfg->src);
+ dev_dbg(dev, "VC %d\n", stream_cfg->vc);
+ dev_dbg(dev, "Nof input pins %d\n", stream_cfg->nof_input_pins);
+ dev_dbg(dev, "Nof output pins %d\n", stream_cfg->nof_output_pins);
+
+ for (i = 0; i < stream_cfg->nof_input_pins; i++) {
+ dev_dbg(dev, "Input pin %d\n", i);
+ dev_dbg(dev, "Mipi data type 0x%0x\n",
+ stream_cfg->input_pins[i].dt);
+ dev_dbg(dev, "Mipi store mode %d\n",
+ stream_cfg->input_pins[i].mipi_store_mode);
+ dev_dbg(dev, "Bits per pixel %d\n",
+ stream_cfg->input_pins[i].bits_per_pix);
+ dev_dbg(dev, "Mapped data type 0x%0x\n",
+ stream_cfg->input_pins[i].mapped_dt);
+ dev_dbg(dev, "Input res width %d\n",
+ stream_cfg->input_pins[i].input_res.width);
+ dev_dbg(dev, "Input res height %d\n",
+ stream_cfg->input_pins[i].input_res.height);
+ dev_dbg(dev, "mipi decompression %d\n",
+ stream_cfg->input_pins[i].mipi_decompression);
+ dev_dbg(dev, "capture_mode %d\n",
+ stream_cfg->input_pins[i].capture_mode);
+ }
+
+ dev_dbg(dev, "Crop info\n");
+ dev_dbg(dev, "Crop.top_offset %d\n", stream_cfg->crop.top_offset);
+ dev_dbg(dev, "Crop.left_offset %d\n", stream_cfg->crop.left_offset);
+ dev_dbg(dev, "Crop.bottom_offset %d\n",
+ stream_cfg->crop.bottom_offset);
+ dev_dbg(dev, "Crop.right_offset %d\n", stream_cfg->crop.right_offset);
+ dev_dbg(dev, "----------------\n");
+
+ for (i = 0; i < stream_cfg->nof_output_pins; i++) {
+ dev_dbg(dev, "Output pin %d\n", i);
+ dev_dbg(dev, "Output input pin id %d\n",
+ stream_cfg->output_pins[i].input_pin_id);
+ dev_dbg(dev, "Output res width %d\n",
+ stream_cfg->output_pins[i].output_res.width);
+ dev_dbg(dev, "Output res height %d\n",
+ stream_cfg->output_pins[i].output_res.height);
+ dev_dbg(dev, "Stride %d\n", stream_cfg->output_pins[i].stride);
+ dev_dbg(dev, "Pin type %d\n", stream_cfg->output_pins[i].pt);
+ dev_dbg(dev, "Payload %d\n",
+ stream_cfg->output_pins[i].payload_buf_size);
+ dev_dbg(dev, "Ft %d\n", stream_cfg->output_pins[i].ft);
+ dev_dbg(dev, "Watermar in lines %d\n",
+ stream_cfg->output_pins[i].watermark_in_lines);
+ dev_dbg(dev, "Send irq %d\n",
+ stream_cfg->output_pins[i].send_irq);
+ dev_dbg(dev, "Reserve compression %d\n",
+ stream_cfg->output_pins[i].reserve_compression);
+ dev_dbg(dev, "snoopable %d\n",
+ stream_cfg->output_pins[i].snoopable);
+ dev_dbg(dev, "error_handling_enable %d\n",
+ stream_cfg->output_pins[i].error_handling_enable);
+ dev_dbg(dev, "sensor type %d\n",
+ stream_cfg->output_pins[i].sensor_type);
+ dev_dbg(dev, "----------------\n");
+ }
+
+ dev_dbg(dev, "Isl_use %d\n", stream_cfg->isl_use);
+ dev_dbg(dev, "stream sensor_type %d\n", stream_cfg->sensor_type);
+
+}
+
+void ipu_fw_isys_dump_frame_buff_set(struct device *dev,
+ struct ipu_fw_isys_frame_buff_set_abi *buf,
+ unsigned int outputs)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "--------------------------\n");
+ dev_dbg(dev, "IPU_FW_ISYS_FRAME_BUFF_SET\n");
+ dev_dbg(dev, "--------------------------\n");
+
+ for (i = 0; i < outputs; i++) {
+ dev_dbg(dev, "Output pin %d\n", i);
+ dev_dbg(dev, "out_buf_id %llu\n",
+ buf->output_pins[i].out_buf_id);
+ dev_dbg(dev, "addr 0x%x\n", buf->output_pins[i].addr);
+ dev_dbg(dev, "compress %u\n", buf->output_pins[i].compress);
+
+ dev_dbg(dev, "----------------\n");
+ }
+
+ dev_dbg(dev, "send_irq_sof 0x%x\n", buf->send_irq_sof);
+ dev_dbg(dev, "send_irq_eof 0x%x\n", buf->send_irq_eof);
+ dev_dbg(dev, "send_resp_sof 0x%x\n", buf->send_resp_sof);
+ dev_dbg(dev, "send_resp_eof 0x%x\n", buf->send_resp_eof);
+ dev_dbg(dev, "send_irq_capture_ack 0x%x\n", buf->send_irq_capture_ack);
+ dev_dbg(dev, "send_irq_capture_done 0x%x\n",
+ buf->send_irq_capture_done);
+ dev_dbg(dev, "send_resp_capture_ack 0x%x\n",
+ buf->send_resp_capture_ack);
+ dev_dbg(dev, "send_resp_capture_done 0x%x\n",
+ buf->send_resp_capture_done);
+}
new file mode 100644
@@ -0,0 +1,816 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_FW_ISYS_H
+#define IPU_FW_ISYS_H
+
+#include "ipu-fw-com.h"
+
+/* Max number of Input/Output Pins */
+#define IPU_MAX_IPINS 4
+
+#define IPU_MAX_OPINS ((IPU_MAX_IPINS) + 1)
+
+#define IPU6_STREAM_ID_MAX 16
+#define IPU6_NONSECURE_STREAM_ID_MAX 12
+#define IPU6_DEV_SEND_QUEUE_SIZE (IPU6_STREAM_ID_MAX)
+#define IPU6_NOF_SRAM_BLOCKS_MAX (IPU6_STREAM_ID_MAX)
+#define IPU6_N_MAX_MSG_SEND_QUEUES (IPU6_STREAM_ID_MAX)
+#define IPU6SE_STREAM_ID_MAX 8
+#define IPU6SE_NONSECURE_STREAM_ID_MAX 4
+#define IPU6SE_DEV_SEND_QUEUE_SIZE (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_NOF_SRAM_BLOCKS_MAX (IPU6SE_STREAM_ID_MAX)
+#define IPU6SE_N_MAX_MSG_SEND_QUEUES (IPU6SE_STREAM_ID_MAX)
+
+/* Single return queue for all streams/commands type */
+#define IPU_N_MAX_MSG_RECV_QUEUES 1
+/* Single device queue for high priority commands (bypass in-order queue) */
+#define IPU_N_MAX_DEV_SEND_QUEUES 1
+/* Single dedicated send queue for proxy interface */
+#define IPU_N_MAX_PROXY_SEND_QUEUES 1
+/* Single dedicated recv queue for proxy interface */
+#define IPU_N_MAX_PROXY_RECV_QUEUES 1
+/* Send queues layout */
+#define IPU_BASE_PROXY_SEND_QUEUES 0
+#define IPU_BASE_DEV_SEND_QUEUES \
+ (IPU_BASE_PROXY_SEND_QUEUES + IPU_N_MAX_PROXY_SEND_QUEUES)
+#define IPU_BASE_MSG_SEND_QUEUES \
+ (IPU_BASE_DEV_SEND_QUEUES + IPU_N_MAX_DEV_SEND_QUEUES)
+/* Recv queues layout */
+#define IPU_BASE_PROXY_RECV_QUEUES 0
+#define IPU_BASE_MSG_RECV_QUEUES \
+ (IPU_BASE_PROXY_RECV_QUEUES + IPU_N_MAX_PROXY_RECV_QUEUES)
+#define IPU_N_MAX_RECV_QUEUES \
+ (IPU_BASE_MSG_RECV_QUEUES + IPU_N_MAX_MSG_RECV_QUEUES)
+
+#define IPU6_N_MAX_SEND_QUEUES \
+ (IPU_BASE_MSG_SEND_QUEUES + IPU6_N_MAX_MSG_SEND_QUEUES)
+#define IPU6SE_N_MAX_SEND_QUEUES \
+ (IPU_BASE_MSG_SEND_QUEUES + IPU6SE_N_MAX_MSG_SEND_QUEUES)
+
+/* Max number of supported input pins routed in ISL */
+#define IPU_MAX_IPINS_IN_ISL 2
+
+/* Max number of planes for frame formats supported by the FW */
+#define IPU_PIN_PLANES_MAX 4
+
+/**
+ * enum ipu_fw_isys_resp_type
+ */
+enum ipu_fw_isys_resp_type {
+ IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE = 0,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK,
+ IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY,
+ IPU_FW_ISYS_RESP_TYPE_PIN_DATA_WATERMARK,
+ IPU_FW_ISYS_RESP_TYPE_FRAME_SOF,
+ IPU_FW_ISYS_RESP_TYPE_FRAME_EOF,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE,
+ IPU_FW_ISYS_RESP_TYPE_PIN_DATA_SKIPPED,
+ IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_SKIPPED,
+ IPU_FW_ISYS_RESP_TYPE_FRAME_SOF_DISCARDED,
+ IPU_FW_ISYS_RESP_TYPE_FRAME_EOF_DISCARDED,
+ IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY,
+ N_IPU_FW_ISYS_RESP_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_send_type
+ */
+enum ipu_fw_isys_send_type {
+ IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN = 0,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_START,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_STOP,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE,
+ N_IPU_FW_ISYS_SEND_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_queue_type
+ */
+enum ipu_fw_isys_queue_type {
+ IPU_FW_ISYS_QUEUE_TYPE_PROXY = 0,
+ IPU_FW_ISYS_QUEUE_TYPE_DEV,
+ IPU_FW_ISYS_QUEUE_TYPE_MSG,
+ N_IPU_FW_ISYS_QUEUE_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_stream_source: Specifies a source for a stream
+ */
+enum ipu_fw_isys_stream_source {
+ IPU_FW_ISYS_STREAM_SRC_PORT_0 = 0,
+ IPU_FW_ISYS_STREAM_SRC_PORT_1,
+ IPU_FW_ISYS_STREAM_SRC_PORT_2,
+ IPU_FW_ISYS_STREAM_SRC_PORT_3,
+ IPU_FW_ISYS_STREAM_SRC_PORT_4,
+ IPU_FW_ISYS_STREAM_SRC_PORT_5,
+ IPU_FW_ISYS_STREAM_SRC_PORT_6,
+ IPU_FW_ISYS_STREAM_SRC_PORT_7,
+ IPU_FW_ISYS_STREAM_SRC_PORT_8,
+ IPU_FW_ISYS_STREAM_SRC_PORT_9,
+ IPU_FW_ISYS_STREAM_SRC_PORT_10,
+ IPU_FW_ISYS_STREAM_SRC_PORT_11,
+ IPU_FW_ISYS_STREAM_SRC_PORT_12,
+ IPU_FW_ISYS_STREAM_SRC_PORT_13,
+ IPU_FW_ISYS_STREAM_SRC_PORT_14,
+ IPU_FW_ISYS_STREAM_SRC_PORT_15,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_2,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_3,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_4,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_5,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_6,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_7,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_8,
+ IPU_FW_ISYS_STREAM_SRC_MIPIGEN_9,
+ N_IPU_FW_ISYS_STREAM_SRC
+};
+
+enum ipu_fw_isys_sensor_type {
+ /* non-snoopable to PSYS */
+ IPU_FW_ISYS_VC1_SENSOR_DATA = 0,
+ /* non-snoopable for PDAF */
+ IPU_FW_ISYS_VC1_SENSOR_PDAF,
+ /* snoopable to CPU */
+ IPU_FW_ISYS_VC0_SENSOR_METADATA,
+ /* snoopable to CPU */
+ IPU_FW_ISYS_VC0_SENSOR_DATA,
+ N_IPU_FW_ISYS_SENSOR_TYPE
+};
+
+enum ipu6se_fw_isys_sensor_info {
+ /* VC1 */
+ IPU6SE_FW_ISYS_SENSOR_DATA_1 = 1,
+ IPU6SE_FW_ISYS_SENSOR_DATA_2 = 2,
+ IPU6SE_FW_ISYS_SENSOR_DATA_3 = 3,
+ IPU6SE_FW_ISYS_SENSOR_PDAF_1 = 4,
+ IPU6SE_FW_ISYS_SENSOR_PDAF_2 = 4,
+ /* VC0 */
+ IPU6SE_FW_ISYS_SENSOR_METADATA = 5,
+ IPU6SE_FW_ISYS_SENSOR_DATA_4 = 6,
+ IPU6SE_FW_ISYS_SENSOR_DATA_5 = 7,
+ IPU6SE_FW_ISYS_SENSOR_DATA_6 = 8,
+ IPU6SE_FW_ISYS_SENSOR_DATA_7 = 9,
+ IPU6SE_FW_ISYS_SENSOR_DATA_8 = 10,
+ IPU6SE_FW_ISYS_SENSOR_DATA_9 = 11,
+ N_IPU6SE_FW_ISYS_SENSOR_INFO,
+ IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START = IPU6SE_FW_ISYS_SENSOR_DATA_1,
+ IPU6SE_FW_ISYS_VC1_SENSOR_DATA_END = IPU6SE_FW_ISYS_SENSOR_DATA_3,
+ IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START = IPU6SE_FW_ISYS_SENSOR_DATA_4,
+ IPU6SE_FW_ISYS_VC0_SENSOR_DATA_END = IPU6SE_FW_ISYS_SENSOR_DATA_9,
+ IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START = IPU6SE_FW_ISYS_SENSOR_PDAF_1,
+ IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_END = IPU6SE_FW_ISYS_SENSOR_PDAF_2,
+};
+
+enum ipu6_fw_isys_sensor_info {
+ /* VC1 */
+ IPU6_FW_ISYS_SENSOR_DATA_1 = 1,
+ IPU6_FW_ISYS_SENSOR_DATA_2 = 2,
+ IPU6_FW_ISYS_SENSOR_DATA_3 = 3,
+ IPU6_FW_ISYS_SENSOR_DATA_4 = 4,
+ IPU6_FW_ISYS_SENSOR_DATA_5 = 5,
+ IPU6_FW_ISYS_SENSOR_DATA_6 = 6,
+ IPU6_FW_ISYS_SENSOR_DATA_7 = 7,
+ IPU6_FW_ISYS_SENSOR_DATA_8 = 8,
+ IPU6_FW_ISYS_SENSOR_DATA_9 = 9,
+ IPU6_FW_ISYS_SENSOR_DATA_10 = 10,
+ IPU6_FW_ISYS_SENSOR_PDAF_1 = 11,
+ IPU6_FW_ISYS_SENSOR_PDAF_2 = 12,
+ /* VC0 */
+ IPU6_FW_ISYS_SENSOR_METADATA = 13,
+ IPU6_FW_ISYS_SENSOR_DATA_11 = 14,
+ IPU6_FW_ISYS_SENSOR_DATA_12 = 15,
+ IPU6_FW_ISYS_SENSOR_DATA_13 = 16,
+ IPU6_FW_ISYS_SENSOR_DATA_14 = 17,
+ IPU6_FW_ISYS_SENSOR_DATA_15 = 18,
+ IPU6_FW_ISYS_SENSOR_DATA_16 = 19,
+ N_IPU6_FW_ISYS_SENSOR_INFO,
+ IPU6_FW_ISYS_VC1_SENSOR_DATA_START = IPU6_FW_ISYS_SENSOR_DATA_1,
+ IPU6_FW_ISYS_VC1_SENSOR_DATA_END = IPU6_FW_ISYS_SENSOR_DATA_10,
+ IPU6_FW_ISYS_VC0_SENSOR_DATA_START = IPU6_FW_ISYS_SENSOR_DATA_11,
+ IPU6_FW_ISYS_VC0_SENSOR_DATA_END = IPU6_FW_ISYS_SENSOR_DATA_16,
+ IPU6_FW_ISYS_VC1_SENSOR_PDAF_START = IPU6_FW_ISYS_SENSOR_PDAF_1,
+ IPU6_FW_ISYS_VC1_SENSOR_PDAF_END = IPU6_FW_ISYS_SENSOR_PDAF_2,
+};
+
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_0
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_1
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_2
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_3
+
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTA IPU_FW_ISYS_STREAM_SRC_PORT_4
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_PORTB IPU_FW_ISYS_STREAM_SRC_PORT_5
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT0 IPU_FW_ISYS_STREAM_SRC_PORT_6
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT1 IPU_FW_ISYS_STREAM_SRC_PORT_7
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT2 IPU_FW_ISYS_STREAM_SRC_PORT_8
+#define IPU_FW_ISYS_STREAM_SRC_CSI2_3PH_CPHY_PORT3 IPU_FW_ISYS_STREAM_SRC_PORT_9
+
+#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_0
+#define IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT1 IPU_FW_ISYS_STREAM_SRC_MIPIGEN_1
+
+/**
+ * enum ipu_fw_isys_mipi_vc: MIPI csi2 spec
+ * supports up to 4 virtual per physical channel
+ */
+enum ipu_fw_isys_mipi_vc {
+ IPU_FW_ISYS_MIPI_VC_0 = 0,
+ IPU_FW_ISYS_MIPI_VC_1,
+ IPU_FW_ISYS_MIPI_VC_2,
+ IPU_FW_ISYS_MIPI_VC_3,
+ N_IPU_FW_ISYS_MIPI_VC
+};
+
+/**
+ * Supported Pixel Frame formats. Expandable if needed
+ */
+enum ipu_fw_isys_frame_format_type {
+ IPU_FW_ISYS_FRAME_FORMAT_NV11 = 0, /* 12 bit YUV 411, Y, UV plane */
+ IPU_FW_ISYS_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */
+ IPU_FW_ISYS_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */
+ IPU_FW_ISYS_FRAME_FORMAT_NV12_TILEY, /* 12 bit YUV 420,
+ * Intel proprietary tiled format,
+ * TileY
+ */
+ IPU_FW_ISYS_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */
+ IPU_FW_ISYS_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */
+ IPU_FW_ISYS_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */
+ IPU_FW_ISYS_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */
+ IPU_FW_ISYS_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV420_10, /* yuv420, 10 bits per subpixel */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV420_12, /* yuv420, 12 bits per subpixel */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV420_14, /* yuv420, 14 bits per subpixel */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV420_16, /* yuv420, 16 bits per subpixel */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV422_16, /* yuv422, 16 bits per subpixel */
+ IPU_FW_ISYS_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */
+ IPU_FW_ISYS_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */
+ IPU_FW_ISYS_FRAME_FORMAT_YUV_LINE, /* Internal format, 2 y lines
+ * followed by a uvinterleaved line
+ */
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8, /* RAW8, 1 plane */
+ IPU_FW_ISYS_FRAME_FORMAT_RAW10, /* RAW10, 1 plane */
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12, /* RAW12, 1 plane */
+ IPU_FW_ISYS_FRAME_FORMAT_RAW14, /* RAW14, 1 plane */
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16, /* RAW16, 1 plane */
+ IPU_FW_ISYS_FRAME_FORMAT_RGB565, /* 16 bit RGB, 1 plane. Each 3 sub
+ * pixels are packed into one 16 bit
+ * value, 5 bits for R, 6 bits
+ * for G and 5 bits for B.
+ */
+
+ IPU_FW_ISYS_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */
+ IPU_FW_ISYS_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane,
+ * A=Alpha (alpha is unused)
+ */
+ IPU_FW_ISYS_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */
+ IPU_FW_ISYS_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. */
+ N_IPU_FW_ISYS_FRAME_FORMAT
+};
+
+/* Temporary for driver compatibility */
+#define IPU_FW_ISYS_FRAME_FORMAT_RAW (IPU_FW_ISYS_FRAME_FORMAT_RAW16)
+
+enum ipu_fw_isys_mipi_compression_type {
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION = 0,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_8_10_TYPE1,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_8_10_TYPE2,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_7_10_TYPE1,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_7_10_TYPE2,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_6_10_TYPE1,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_10_6_10_TYPE2,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_8_12_TYPE1,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_8_12_TYPE2,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_7_12_TYPE1,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_7_12_TYPE2,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_6_12_TYPE1,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_6_12_TYPE2,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_10_12_TYPE1,
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_12_10_12_TYPE2,
+ N_IPU_FW_ISYS_MIPI_COMPRESSION_TYPE,
+};
+
+/**
+ * Supported MIPI data type. Keep in sync array in ipu_fw_isys_private.c
+ */
+enum ipu_fw_isys_mipi_data_type {
+ /** SYNCHRONIZATION SHORT PACKET DATA TYPES */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_START_CODE = 0x00,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_FRAME_END_CODE = 0x01,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_START_CODE = 0x02, /* Optional */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_LINE_END_CODE = 0x03, /* Optional */
+ /** Reserved 0x04-0x07 */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x04 = 0x04,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x05 = 0x05,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x06 = 0x06,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x07 = 0x07,
+ /** GENERIC SHORT PACKET DATA TYPES */
+ /** They are used to keep the timing information for
+ * the opening/closing of shutters,
+ * triggering of flashes and etc.
+ */
+ /* Generic Short Packet Codes 1 - 8 */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT1 = 0x08,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT2 = 0x09,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT3 = 0x0A,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT4 = 0x0B,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT5 = 0x0C,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT6 = 0x0D,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT7 = 0x0E,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_GENERIC_SHORT8 = 0x0F,
+ /** GENERIC LONG PACKET DATA TYPES */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_NULL = 0x10,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_BLANKING_DATA = 0x11,
+ /* Embedded 8-bit non Image Data */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_EMBEDDED = 0x12,
+ /** Reserved 0x13-0x17 */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x13 = 0x13,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x14 = 0x14,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x15 = 0x15,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x16 = 0x16,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x17 = 0x17,
+ /** YUV DATA TYPES */
+ /* 8 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8 = 0x18,
+ /* 10 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10 = 0x19,
+ /* 8 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_LEGACY = 0x1A,
+ /** Reserved 0x1B */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x1B = 0x1B,
+ /* YUV420 8-bit Chroma Shifted Pixel Sampling) */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_8_SHIFT = 0x1C,
+ /* YUV420 8-bit (Chroma Shifted Pixel Sampling) */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV420_10_SHIFT = 0x1D,
+ /* UYVY..UVYV, 8 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_8 = 0x1E,
+ /* UYVY..UVYV, 10 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_YUV422_10 = 0x1F,
+ /** RGB DATA TYPES */
+ /* BGR..BGR, 4 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_444 = 0x20,
+ /* BGR..BGR, 5 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_555 = 0x21,
+ /* BGR..BGR, 5 bits B and R, 6 bits G */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_565 = 0x22,
+ /* BGR..BGR, 6 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_666 = 0x23,
+ /* BGR..BGR, 8 bits per subpixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RGB_888 = 0x24,
+ /** Reserved 0x25-0x27 */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x25 = 0x25,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x26 = 0x26,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x27 = 0x27,
+ /** RAW DATA TYPES */
+ /* RAW data, 6 - 14 bits per pixel */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_6 = 0x28,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_7 = 0x29,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_8 = 0x2A,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_10 = 0x2B,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_12 = 0x2C,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_14 = 0x2D,
+ /** Reserved 0x2E-2F are used with assigned meaning */
+ /* RAW data, 16 bits per pixel, not specified in CSI-MIPI standard */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RAW_16 = 0x2E,
+ /* Binary byte stream, which is target at JPEG,
+ * not specified in CSI-MIPI standard
+ */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_BINARY_8 = 0x2F,
+
+ /** USER DEFINED 8-BIT DATA TYPES */
+ /** For example, the data transmitter (e.g. the SoC sensor)
+ * can keep the JPEG data as
+ * the User Defined Data Type 4 and the MPEG data as the
+ * User Defined Data Type 7.
+ */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF1 = 0x30,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF2 = 0x31,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF3 = 0x32,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF4 = 0x33,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF5 = 0x34,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF6 = 0x35,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF7 = 0x36,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_USER_DEF8 = 0x37,
+ /** Reserved 0x38-0x3F */
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x38 = 0x38,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x39 = 0x39,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3A = 0x3A,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3B = 0x3B,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3C = 0x3C,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3D = 0x3D,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3E = 0x3E,
+ IPU_FW_ISYS_MIPI_DATA_TYPE_RESERVED_0x3F = 0x3F,
+
+ /* Keep always last and max value */
+ N_IPU_FW_ISYS_MIPI_DATA_TYPE = 0x40
+};
+
+/** enum ipu_fw_isys_pin_type: output pin buffer types.
+ * Buffers can be queued and de-queued to hand them over between IA and ISYS
+ */
+enum ipu_fw_isys_pin_type {
+ /* Captured as MIPI packets */
+ IPU_FW_ISYS_PIN_TYPE_MIPI = 0,
+ /* Captured through the RAW path */
+ IPU_FW_ISYS_PIN_TYPE_RAW_NS = 1,
+ /* Captured through the SoC path */
+ IPU_FW_ISYS_PIN_TYPE_RAW_SOC = 3,
+ /* Reserved for future use, maybe short packets */
+ IPU_FW_ISYS_PIN_TYPE_METADATA_0 = 4,
+ /* Reserved for future use */
+ IPU_FW_ISYS_PIN_TYPE_METADATA_1 = 5,
+ /* Keep always last and max value */
+ N_IPU_FW_ISYS_PIN_TYPE
+};
+
+/**
+ * enum ipu_fw_isys_mipi_store_mode. Describes if long MIPI packets reach
+ * MIPI SRAM with the long packet header or
+ * if not, then only option is to capture it with pin type MIPI.
+ */
+enum ipu_fw_isys_mipi_store_mode {
+ IPU_FW_ISYS_MIPI_STORE_MODE_NORMAL = 0,
+ IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER,
+ N_IPU_FW_ISYS_MIPI_STORE_MODE
+};
+
+/**
+ * ISYS capture mode and sensor enums
+ * Used for Tobii sensor, if doubt, use default value 0
+ */
+
+enum ipu_fw_isys_capture_mode {
+ IPU_FW_ISYS_CAPTURE_MODE_REGULAR = 0,
+ IPU_FW_ISYS_CAPTURE_MODE_BURST,
+ N_IPU_FW_ISYS_CAPTURE_MODE,
+};
+
+enum ipu_fw_isys_sensor_mode {
+ IPU_FW_ISYS_SENSOR_MODE_NORMAL = 0,
+ IPU_FW_ISYS_SENSOR_MODE_TOBII,
+ N_IPU_FW_ISYS_SENSOR_MODE,
+};
+
+/**
+ * enum ipu_fw_isys_error. Describes the error type detected by the FW
+ */
+enum ipu_fw_isys_error {
+ IPU_FW_ISYS_ERROR_NONE = 0, /* No details */
+ IPU_FW_ISYS_ERROR_FW_INTERNAL_CONSISTENCY, /* enum */
+ IPU_FW_ISYS_ERROR_HW_CONSISTENCY, /* enum */
+ IPU_FW_ISYS_ERROR_DRIVER_INVALID_COMMAND_SEQUENCE, /* enum */
+ IPU_FW_ISYS_ERROR_DRIVER_INVALID_DEVICE_CONFIGURATION, /* enum */
+ IPU_FW_ISYS_ERROR_DRIVER_INVALID_STREAM_CONFIGURATION, /* enum */
+ IPU_FW_ISYS_ERROR_DRIVER_INVALID_FRAME_CONFIGURATION, /* enum */
+ IPU_FW_ISYS_ERROR_INSUFFICIENT_RESOURCES, /* enum */
+ IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO, /* HW code */
+ IPU_FW_ISYS_ERROR_HW_REPORTED_SIG2CIO, /* HW code */
+ IPU_FW_ISYS_ERROR_SENSOR_FW_SYNC, /* enum */
+ IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION, /* FW code */
+ IPU_FW_ISYS_ERROR_RESPONSE_QUEUE_FULL, /* FW code */
+ N_IPU_FW_ISYS_ERROR
+};
+
+/**
+ * enum ipu_fw_proxy_error. Describes the error type for
+ * the proxy detected by the FW
+ */
+enum ipu_fw_proxy_error {
+ IPU_FW_PROXY_ERROR_NONE = 0,
+ IPU_FW_PROXY_ERROR_INVALID_WRITE_REGION,
+ IPU_FW_PROXY_ERROR_INVALID_WRITE_OFFSET,
+ N_IPU_FW_PROXY_ERROR
+};
+
+struct ipu_isys;
+
+struct ipu6_fw_isys_buffer_partition_abi {
+ u32 num_gda_pages[IPU6_STREAM_ID_MAX];
+};
+
+struct ipu6_fw_isys_fw_config {
+ struct ipu6_fw_isys_buffer_partition_abi buffer_partition;
+ u32 num_send_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
+ u32 num_recv_queues[N_IPU_FW_ISYS_QUEUE_TYPE];
+};
+
+/**
+ * struct ipu_fw_isys_resolution_abi: Generic resolution structure.
+ * @Width
+ * @Height
+ */
+struct ipu_fw_isys_resolution_abi {
+ u32 width;
+ u32 height;
+};
+
+/**
+ * struct ipu_fw_isys_output_pin_payload_abi
+ * @out_buf_id: Points to output pin buffer - buffer identifier
+ * @addr: Points to output pin buffer - CSS Virtual Address
+ * @compress: Request frame compression (1), or not (0)
+ */
+struct ipu_fw_isys_output_pin_payload_abi {
+ u64 out_buf_id;
+ u32 addr;
+ u32 compress;
+};
+
+/**
+ * struct ipu_fw_isys_output_pin_info_abi
+ * @output_res: output pin resolution
+ * @stride: output stride in Bytes (not valid for statistics)
+ * @watermark_in_lines: pin watermark level in lines
+ * @payload_buf_size: minimum size in Bytes of all buffers that will be
+ * supplied for capture on this pin
+ * @send_irq: assert if pin event should trigger irq
+ * @pt: pin type -real format "enum ipu_fw_isys_pin_type"
+ * @ft: frame format type -real format "enum ipu_fw_isys_frame_format_type"
+ * @input_pin_id: related input pin id
+ * @reserve_compression: reserve compression resources for pin
+ */
+struct ipu_fw_isys_output_pin_info_abi {
+ struct ipu_fw_isys_resolution_abi output_res;
+ u32 stride;
+ u32 watermark_in_lines;
+ u32 payload_buf_size;
+ u32 ts_offsets[IPU_PIN_PLANES_MAX];
+ u32 s2m_pixel_soc_pixel_remapping;
+ u32 csi_be_soc_pixel_remapping;
+ u8 send_irq;
+ u8 input_pin_id;
+ u8 pt;
+ u8 ft;
+ u8 reserved;
+ u8 reserve_compression;
+ u8 snoopable;
+ u8 error_handling_enable;
+ u32 sensor_type;
+};
+
+/**
+ * struct ipu_fw_isys_param_pin_abi
+ * @param_buf_id: Points to param port buffer - buffer identifier
+ * @addr: Points to param pin buffer - CSS Virtual Address
+ */
+struct ipu_fw_isys_param_pin_abi {
+ u64 param_buf_id;
+ u32 addr;
+};
+
+/**
+ * struct ipu_fw_isys_input_pin_info_abi
+ * @input_res: input resolution
+ * @dt: mipi data type ((enum ipu_fw_isys_mipi_data_type)
+ * @mipi_store_mode: defines if legacy long packet header will be stored or
+ * discarded if discarded, output pin type for this
+ * input pin can only be MIPI
+ * (enum ipu_fw_isys_mipi_store_mode)
+ * @bits_per_pix: native bits per pixel
+ * @mapped_dt: actual data type from sensor
+ * @mipi_decompression: defines which compression will be in mipi backend
+
+ * @crop_first_and_last_lines Control whether to crop the
+ * first and last line of the
+ * input image. Crop done by HW
+ * device.
+ * @capture_mode: mode of capture, regular or burst, default value is regular
+ */
+struct ipu_fw_isys_input_pin_info_abi {
+ struct ipu_fw_isys_resolution_abi input_res;
+ u8 dt;
+ u8 mipi_store_mode;
+ u8 bits_per_pix;
+ u8 mapped_dt;
+ u8 mipi_decompression;
+ u8 crop_first_and_last_lines;
+ u8 capture_mode;
+};
+
+/**
+ * struct ipu_fw_isys_cropping_abi - cropping coordinates
+ */
+struct ipu_fw_isys_cropping_abi {
+ s32 top_offset;
+ s32 left_offset;
+ s32 bottom_offset;
+ s32 right_offset;
+};
+
+/**
+ * struct ipu_fw_isys_stream_cfg_data_abi
+ * ISYS stream configuration data structure
+ * @crop: defines cropping resolution for the
+ * maximum number of input pins which can be cropped,
+ * it is directly mapped to the HW devices
+ * @input_pins: input pin descriptors
+ * @output_pins: output pin descriptors
+ * @compfmt: de-compression setting for User Defined Data
+ * @nof_input_pins: number of input pins
+ * @nof_output_pins: number of output pins
+ * @send_irq_sof_discarded: send irq on discarded frame sof response
+ * - if '1' it will override the send_resp_sof_discarded
+ * and send the response
+ * - if '0' the send_resp_sof_discarded will determine
+ * whether to send the response
+ * @send_irq_eof_discarded: send irq on discarded frame eof response
+ * - if '1' it will override the send_resp_eof_discarded
+ * and send the response
+ * - if '0' the send_resp_eof_discarded will determine
+ * whether to send the response
+ * @send_resp_sof_discarded: send response for discarded frame sof detected,
+ * used only when send_irq_sof_discarded is '0'
+ * @send_resp_eof_discarded: send response for discarded frame eof detected,
+ * used only when send_irq_eof_discarded is '0'
+ * @src: Stream source index e.g. MIPI_generator_0, CSI2-rx_1
+ * @vc: MIPI Virtual Channel (up to 4 virtual per physical channel)
+ * @isl_use: indicates whether stream requires ISL and how
+ * @sensor_type: type of connected sensor, tobii or others, default is 0
+ */
+struct ipu_fw_isys_stream_cfg_data_abi {
+ struct ipu_fw_isys_cropping_abi crop;
+ struct ipu_fw_isys_input_pin_info_abi input_pins[IPU_MAX_IPINS];
+ struct ipu_fw_isys_output_pin_info_abi output_pins[IPU_MAX_OPINS];
+ u32 compfmt;
+ u8 nof_input_pins;
+ u8 nof_output_pins;
+ u8 send_irq_sof_discarded;
+ u8 send_irq_eof_discarded;
+ u8 send_resp_sof_discarded;
+ u8 send_resp_eof_discarded;
+ u8 src;
+ u8 vc;
+ u8 isl_use;
+ u8 sensor_type;
+};
+
+/**
+ * struct ipu_fw_isys_frame_buff_set - frame buffer set
+ * @output_pins: output pin addresses
+ * @send_irq_sof: send irq on frame sof response
+ * - if '1' it will override the send_resp_sof and
+ * send the response
+ * - if '0' the send_resp_sof will determine whether to
+ * send the response
+ * @send_irq_eof: send irq on frame eof response
+ * - if '1' it will override the send_resp_eof and
+ * send the response
+ * - if '0' the send_resp_eof will determine whether to
+ * send the response
+ * @send_resp_sof: send response for frame sof detected,
+ * used only when send_irq_sof is '0'
+ * @send_resp_eof: send response for frame eof detected,
+ * used only when send_irq_eof is '0'
+ * @send_resp_capture_ack: send response for capture ack event
+ * @send_resp_capture_done: send response for capture done event
+ */
+struct ipu_fw_isys_frame_buff_set_abi {
+ struct ipu_fw_isys_output_pin_payload_abi output_pins[IPU_MAX_OPINS];
+ u8 send_irq_sof;
+ u8 send_irq_eof;
+ u8 send_irq_capture_ack;
+ u8 send_irq_capture_done;
+ u8 send_resp_sof;
+ u8 send_resp_eof;
+ u8 send_resp_capture_ack;
+ u8 send_resp_capture_done;
+ u8 reserved;
+};
+
+/**
+ * struct ipu_fw_isys_error_info_abi
+ * @error: error code if something went wrong
+ * @error_details: depending on error code, it may contain additional error info
+ */
+struct ipu_fw_isys_error_info_abi {
+ enum ipu_fw_isys_error error;
+ u32 error_details;
+};
+
+/**
+ * struct ipu_fw_isys_resp_info_comm
+ * @pin: this var is only valid for pin event related responses,
+ * contains pin addresses
+ * @error_info: error information from the FW
+ * @timestamp: Time information for event if available
+ * @stream_handle: stream id the response corresponds to
+ * @type: response type (enum ipu_fw_isys_resp_type)
+ * @pin_id: pin id that the pin payload corresponds to
+ */
+struct ipu_fw_isys_resp_info_abi {
+ u64 buf_id;
+ struct ipu_fw_isys_output_pin_payload_abi pin;
+ struct ipu_fw_isys_error_info_abi error_info;
+ u32 timestamp[2];
+ u8 stream_handle;
+ u8 type;
+ u8 pin_id;
+ u16 reserved;
+};
+
+/**
+ * struct ipu_fw_isys_proxy_error_info_comm
+ * @proxy_error: error code if something went wrong
+ * @proxy_error_details: depending on error code, it may contain additional
+ * error info
+ */
+struct ipu_fw_isys_proxy_error_info_abi {
+ enum ipu_fw_proxy_error error;
+ u32 error_details;
+};
+
+struct ipu_fw_isys_proxy_resp_info_abi {
+ u32 request_id;
+ struct ipu_fw_isys_proxy_error_info_abi error_info;
+};
+
+/**
+ * struct ipu_fw_proxy_write_queue_token
+ * @request_id: update id for the specific proxy write request
+ * @region_index: Region id for the proxy write request
+ * @offset: Offset of the write request according to the base address
+ * of the region
+ * @value: Value that is requested to be written with the proxy write request
+ */
+struct ipu_fw_proxy_write_queue_token {
+ u32 request_id;
+ u32 region_index;
+ u32 offset;
+ u32 value;
+};
+
+/* From here on type defines not coming from the ISYSAPI interface */
+
+/**
+ * struct ipu_fw_resp_queue_token
+ */
+struct ipu_fw_resp_queue_token {
+ struct ipu_fw_isys_resp_info_abi resp_info;
+};
+
+/**
+ * struct ipu_fw_send_queue_token
+ */
+struct ipu_fw_send_queue_token {
+ u64 buf_handle;
+ u32 payload;
+ u16 send_type;
+ u16 stream_id;
+};
+
+/**
+ * struct ipu_fw_proxy_resp_queue_token
+ */
+struct ipu_fw_proxy_resp_queue_token {
+ struct ipu_fw_isys_proxy_resp_info_abi proxy_resp_info;
+};
+
+/**
+ * struct ipu_fw_proxy_send_queue_token
+ */
+struct ipu_fw_proxy_send_queue_token {
+ u32 request_id;
+ u32 region_index;
+ u32 offset;
+ u32 value;
+};
+
+void ipu_fw_isys_set_params(struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg);
+
+void ipu_fw_isys_dump_stream_cfg(struct device *dev,
+ struct ipu_fw_isys_stream_cfg_data_abi
+ *stream_cfg);
+void ipu_fw_isys_dump_frame_buff_set(struct device *dev,
+ struct ipu_fw_isys_frame_buff_set_abi *buf,
+ unsigned int outputs);
+int ipu_fw_isys_init(struct ipu_isys *isys, unsigned int num_streams);
+int ipu_fw_isys_close(struct ipu_isys *isys);
+int ipu_fw_isys_simple_cmd(struct ipu_isys *isys,
+ const unsigned int stream_handle,
+ enum ipu_fw_isys_send_type send_type);
+int ipu_fw_isys_complex_cmd(struct ipu_isys *isys,
+ const unsigned int stream_handle,
+ void *cpu_mapped_buf,
+ dma_addr_t dma_mapped_buf,
+ size_t size, enum ipu_fw_isys_send_type send_type);
+int ipu_fw_isys_send_proxy_token(struct ipu_isys *isys,
+ unsigned int req_id,
+ unsigned int index,
+ unsigned int offset, u32 value);
+void ipu_fw_isys_cleanup(struct ipu_isys *isys);
+struct ipu_fw_isys_resp_info_abi *
+ipu_fw_isys_get_resp(void *context, unsigned int queue,
+ struct ipu_fw_isys_resp_info_abi *response);
+void ipu_fw_isys_put_resp(void *context, unsigned int queue);
+#endif
new file mode 100644
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2016 - 2020 Intel Corporation
+
+#include <linux/delay.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu-fw-com.h"
+#include "ipu-fw-psys.h"
+#include "ipu-psys.h"
+
+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd)
+{
+ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_STARTED;
+ return 0;
+}
+
+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_fw_psys_cmd *psys_cmd;
+ int ret = 0;
+
+ psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0);
+ if (!psys_cmd) {
+ dev_err(&kcmd->fh->psys->adev->dev,
+ "%s failed to get token!\n", __func__);
+ kcmd->pg_user = NULL;
+ ret = -ENODATA;
+ goto out;
+ }
+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_START;
+ psys_cmd->msg = 0;
+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+ ipu_send_put_token(kcmd->fh->psys->fwcom, 0);
+
+out:
+ return ret;
+}
+
+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_fw_psys_cmd *psys_cmd;
+ int ret = 0;
+
+ /* ppg suspend cmd uses QUEUE_DEVICE_ID instead of QUEUE_COMMAND_ID */
+ psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 1);
+ if (!psys_cmd) {
+ dev_err(&kcmd->fh->psys->adev->dev,
+ "%s failed to get token!\n", __func__);
+ kcmd->pg_user = NULL;
+ ret = -ENODATA;
+ goto out;
+ }
+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND;
+ psys_cmd->msg = 0;
+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+ ipu_send_put_token(kcmd->fh->psys->fwcom, 1);
+
+out:
+ return ret;
+}
+
+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_fw_psys_cmd *psys_cmd;
+ int ret = 0;
+
+ psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0);
+ if (!psys_cmd) {
+ dev_err(&kcmd->fh->psys->adev->dev,
+ "%s failed to get token!\n", __func__);
+ kcmd->pg_user = NULL;
+ ret = -ENODATA;
+ goto out;
+ }
+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME;
+ psys_cmd->msg = 0;
+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+ ipu_send_put_token(kcmd->fh->psys->fwcom, 0);
+
+out:
+ return ret;
+}
+
+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_fw_psys_cmd *psys_cmd;
+ int ret = 0;
+
+ psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, 0);
+ if (!psys_cmd) {
+ dev_err(&kcmd->fh->psys->adev->dev,
+ "%s failed to get token!\n", __func__);
+ kcmd->pg_user = NULL;
+ ret = -ENODATA;
+ goto out;
+ }
+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP;
+ psys_cmd->msg = 0;
+ psys_cmd->context_handle = kcmd->kpg->pg->ipu_virtual_address;
+ ipu_send_put_token(kcmd->fh->psys->fwcom, 0);
+
+out:
+ return ret;
+}
+
+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd)
+{
+ kcmd->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_BLOCKED;
+ return 0;
+}
+
+int ipu_fw_psys_rcv_event(struct ipu_psys *psys,
+ struct ipu_fw_psys_event *event)
+{
+ void *rcv;
+
+ rcv = ipu_recv_get_token(psys->fwcom, 0);
+ if (!rcv)
+ return 0;
+
+ memcpy(event, rcv, sizeof(*event));
+ ipu_recv_put_token(psys->fwcom, 0);
+ return 1;
+}
+
+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal,
+ int terminal_idx,
+ struct ipu_psys_kcmd *kcmd,
+ u32 buffer, unsigned int size)
+{
+ u32 type;
+ u32 buffer_state;
+
+ type = terminal->terminal_type;
+
+ switch (type) {
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT:
+ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED;
+ break;
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM:
+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN:
+ buffer_state = IPU_FW_PSYS_BUFFER_FULL;
+ break;
+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT:
+ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY;
+ break;
+ default:
+ dev_err(&kcmd->fh->psys->adev->dev,
+ "unknown terminal type: 0x%x\n", type);
+ return -EAGAIN;
+ }
+
+ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN ||
+ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) {
+ struct ipu_fw_psys_data_terminal *dterminal =
+ (struct ipu_fw_psys_data_terminal *)terminal;
+ dterminal->connection_type = IPU_FW_PSYS_CONNECTION_MEMORY;
+ dterminal->frame.data_bytes = size;
+ if (!ipu_fw_psys_pg_get_protocol(kcmd))
+ dterminal->frame.data = buffer;
+ else
+ dterminal->frame.data_index = terminal_idx;
+ dterminal->frame.buffer_state = buffer_state;
+ } else {
+ struct ipu_fw_psys_param_terminal *pterminal =
+ (struct ipu_fw_psys_param_terminal *)terminal;
+ if (!ipu_fw_psys_pg_get_protocol(kcmd))
+ pterminal->param_payload.buffer = buffer;
+ else
+ pterminal->param_payload.terminal_index = terminal_idx;
+ }
+ return 0;
+}
+
+void ipu_fw_psys_pg_dump(struct ipu_psys *psys,
+ struct ipu_psys_kcmd *kcmd, const char *note)
+{
+ ipu6_fw_psys_pg_dump(psys, kcmd, note);
+}
+
+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd)
+{
+ return kcmd->kpg->pg->ID;
+}
+
+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd)
+{
+ return kcmd->kpg->pg->terminal_count;
+}
+
+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd)
+{
+ return kcmd->kpg->pg->size;
+}
+
+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd,
+ dma_addr_t vaddress)
+{
+ kcmd->kpg->pg->ipu_virtual_address = vaddress;
+ return 0;
+}
+
+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd
+ *kcmd, int index)
+{
+ struct ipu_fw_psys_terminal *terminal;
+ u16 *terminal_offset_table;
+
+ terminal_offset_table =
+ (uint16_t *)((char *)kcmd->kpg->pg +
+ kcmd->kpg->pg->terminals_offset);
+ terminal = (struct ipu_fw_psys_terminal *)
+ ((char *)kcmd->kpg->pg + terminal_offset_table[index]);
+ return terminal;
+}
+
+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token)
+{
+ kcmd->kpg->pg->token = (u64)token;
+}
+
+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd)
+{
+ return kcmd->kpg->pg->token;
+}
+
+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd)
+{
+ return kcmd->kpg->pg->protocol_version;
+}
+
+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd,
+ struct ipu_fw_psys_terminal *terminal,
+ int terminal_idx, u32 buffer)
+{
+ u32 type;
+ u32 buffer_state;
+ u32 *buffer_ptr;
+ struct ipu_fw_psys_buffer_set *buf_set = kcmd->kbuf_set->buf_set;
+
+ type = terminal->terminal_type;
+
+ switch (type) {
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM:
+ case IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT:
+ buffer_state = IPU_FW_PSYS_BUFFER_UNDEFINED;
+ break;
+ case IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM:
+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN:
+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN:
+ buffer_state = IPU_FW_PSYS_BUFFER_FULL;
+ break;
+ case IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT:
+ case IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT:
+ buffer_state = IPU_FW_PSYS_BUFFER_EMPTY;
+ break;
+ default:
+ dev_err(&kcmd->fh->psys->adev->dev,
+ "unknown terminal type: 0x%x\n", type);
+ return -EAGAIN;
+ }
+
+ buffer_ptr = (u32 *)((char *)buf_set + sizeof(*buf_set) +
+ terminal_idx * sizeof(*buffer_ptr));
+
+ *buffer_ptr = buffer;
+
+ if (type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN ||
+ type == IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT) {
+ struct ipu_fw_psys_data_terminal *dterminal =
+ (struct ipu_fw_psys_data_terminal *)terminal;
+ dterminal->frame.buffer_state = buffer_state;
+ }
+
+ return 0;
+}
+
+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd)
+{
+ return (sizeof(struct ipu_fw_psys_buffer_set) +
+ kcmd->kpg->pg->terminal_count * sizeof(u32));
+}
+
+int
+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set,
+ u32 vaddress)
+{
+ buf_set->ipu_virtual_address = vaddress;
+ return 0;
+}
+
+int ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap(
+ struct ipu_fw_psys_buffer_set *buf_set,
+ u32 *kernel_enable_bitmap)
+{
+ memcpy(buf_set->kernel_enable_bitmap, (u8 *)kernel_enable_bitmap,
+ sizeof(buf_set->kernel_enable_bitmap));
+ return 0;
+}
+
+struct ipu_fw_psys_buffer_set *
+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd,
+ void *kaddr, u32 frame_counter)
+{
+ struct ipu_fw_psys_buffer_set *buffer_set = NULL;
+ unsigned int i;
+
+ buffer_set = (struct ipu_fw_psys_buffer_set *)kaddr;
+
+ /*
+ * Set base struct members
+ */
+ buffer_set->ipu_virtual_address = 0;
+ buffer_set->process_group_handle = kcmd->kpg->pg->ipu_virtual_address;
+ buffer_set->frame_counter = frame_counter;
+ buffer_set->terminal_count = kcmd->kpg->pg->terminal_count;
+
+ /*
+ * Initialize adjacent buffer addresses
+ */
+ for (i = 0; i < buffer_set->terminal_count; i++) {
+ u32 *buffer =
+ (u32 *)((char *)buffer_set +
+ sizeof(*buffer_set) + sizeof(u32) * i);
+
+ *buffer = 0;
+ }
+
+ return buffer_set;
+}
+
+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_fw_psys_cmd *psys_cmd;
+ unsigned int queue_id;
+ int ret = 0;
+ unsigned int size;
+
+ if (ipu_ver == IPU_VER_6SE)
+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ else
+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ queue_id = kcmd->kpg->pg->base_queue_id;
+
+ if (queue_id >= size)
+ return -EINVAL;
+
+ psys_cmd = ipu_send_get_token(kcmd->fh->psys->fwcom, queue_id);
+ if (!psys_cmd) {
+ dev_err(&kcmd->fh->psys->adev->dev,
+ "%s failed to get token!\n", __func__);
+ kcmd->pg_user = NULL;
+ return -ENODATA;
+ }
+
+ psys_cmd->command = IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN;
+ psys_cmd->msg = 0;
+ psys_cmd->context_handle = kcmd->kbuf_set->buf_set->ipu_virtual_address;
+
+ ipu_send_put_token(kcmd->fh->psys->fwcom, queue_id);
+
+ return ret;
+}
+
+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd)
+{
+ return kcmd->kpg->pg->base_queue_id;
+}
+
+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id)
+{
+ kcmd->kpg->pg->base_queue_id = queue_id;
+}
+
+int ipu_fw_psys_open(struct ipu_psys *psys)
+{
+ int retry = IPU_PSYS_OPEN_RETRY, retval;
+
+ retval = ipu_fw_com_open(psys->fwcom);
+ if (retval) {
+ dev_err(&psys->adev->dev, "fw com open failed.\n");
+ return retval;
+ }
+
+ do {
+ usleep_range(IPU_PSYS_OPEN_TIMEOUT_US,
+ IPU_PSYS_OPEN_TIMEOUT_US + 10);
+ retval = ipu_fw_com_ready(psys->fwcom);
+ if (!retval) {
+ dev_dbg(&psys->adev->dev, "psys port open ready!\n");
+ break;
+ }
+ } while (retry-- > 0);
+
+ if (!retry && retval) {
+ dev_err(&psys->adev->dev, "psys port open ready failed %d\n",
+ retval);
+ ipu_fw_com_close(psys->fwcom);
+ return retval;
+ }
+ return 0;
+}
+
+int ipu_fw_psys_close(struct ipu_psys *psys)
+{
+ int retval;
+
+ retval = ipu_fw_com_close(psys->fwcom);
+ if (retval) {
+ dev_err(&psys->adev->dev, "fw com close failed.\n");
+ return retval;
+ }
+ return retval;
+}
new file mode 100644
@@ -0,0 +1,382 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2016 - 2020 Intel Corporation */
+
+#ifndef IPU_FW_PSYS_H
+#define IPU_FW_PSYS_H
+
+#include "ipu6-platform-resources.h"
+#include "ipu6se-platform-resources.h"
+#include "ipu6ep-platform-resources.h"
+
+#define IPU_FW_PSYS_CMD_QUEUE_SIZE 0x20
+#define IPU_FW_PSYS_EVENT_QUEUE_SIZE 0x40
+
+#define IPU_FW_PSYS_CMD_BITS 64
+#define IPU_FW_PSYS_EVENT_BITS 128
+
+enum {
+ IPU_FW_PSYS_EVENT_TYPE_SUCCESS = 0,
+ IPU_FW_PSYS_EVENT_TYPE_UNKNOWN_ERROR = 1,
+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NOT_FOUND = 2,
+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_TOO_BIG = 3,
+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_DDR_TRANS_ERR = 4,
+ IPU_FW_PSYS_EVENT_TYPE_RET_REM_OBJ_NULL_PKG_DIR_ADDR = 5,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAME_ERR = 6,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_LOAD_FRAGMENT_ERR = 7,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_COUNT_ZERO = 8,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_PROCESS_INIT_ERR = 9,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_ABORT = 10,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_NULL = 11,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_VALIDATION_ERR = 12,
+ IPU_FW_PSYS_EVENT_TYPE_PROC_GRP_INVALID_FRAME = 13
+};
+
+enum {
+ IPU_FW_PSYS_EVENT_QUEUE_MAIN_ID,
+ IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID
+};
+
+enum {
+ IPU_FW_PSYS_PROCESS_GROUP_ERROR = 0,
+ IPU_FW_PSYS_PROCESS_GROUP_CREATED,
+ IPU_FW_PSYS_PROCESS_GROUP_READY,
+ IPU_FW_PSYS_PROCESS_GROUP_BLOCKED,
+ IPU_FW_PSYS_PROCESS_GROUP_STARTED,
+ IPU_FW_PSYS_PROCESS_GROUP_RUNNING,
+ IPU_FW_PSYS_PROCESS_GROUP_STALLED,
+ IPU_FW_PSYS_PROCESS_GROUP_STOPPED,
+ IPU_FW_PSYS_N_PROCESS_GROUP_STATES
+};
+
+enum {
+ IPU_FW_PSYS_CONNECTION_MEMORY = 0,
+ IPU_FW_PSYS_CONNECTION_MEMORY_STREAM,
+ IPU_FW_PSYS_CONNECTION_STREAM,
+ IPU_FW_PSYS_N_CONNECTION_TYPES
+};
+
+enum {
+ IPU_FW_PSYS_BUFFER_NULL = 0,
+ IPU_FW_PSYS_BUFFER_UNDEFINED,
+ IPU_FW_PSYS_BUFFER_EMPTY,
+ IPU_FW_PSYS_BUFFER_NONEMPTY,
+ IPU_FW_PSYS_BUFFER_FULL,
+ IPU_FW_PSYS_N_BUFFER_STATES
+};
+
+enum {
+ IPU_FW_PSYS_TERMINAL_TYPE_DATA_IN = 0,
+ IPU_FW_PSYS_TERMINAL_TYPE_DATA_OUT,
+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_STREAM,
+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_IN,
+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_CACHED_OUT,
+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_IN,
+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SPATIAL_OUT,
+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_IN,
+ IPU_FW_PSYS_TERMINAL_TYPE_PARAM_SLICED_OUT,
+ IPU_FW_PSYS_TERMINAL_TYPE_STATE_IN,
+ IPU_FW_PSYS_TERMINAL_TYPE_STATE_OUT,
+ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM,
+ IPU_FW_PSYS_TERMINAL_TYPE_PROGRAM_CONTROL_INIT,
+ IPU_FW_PSYS_N_TERMINAL_TYPES
+};
+
+enum {
+ IPU_FW_PSYS_COL_DIMENSION = 0,
+ IPU_FW_PSYS_ROW_DIMENSION = 1,
+ IPU_FW_PSYS_N_DATA_DIMENSION = 2
+};
+
+enum {
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_NOP = 0,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUBMIT,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_ATTACH,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_DETACH,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_START,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_DISOWN,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_ABORT,
+ IPU_FW_PSYS_PROCESS_GROUP_CMD_RESET,
+ IPU_FW_PSYS_N_PROCESS_GROUP_CMDS
+};
+
+enum {
+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_LEGACY = 0,
+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG,
+ IPU_FW_PSYS_PROCESS_GROUP_N_PROTOCOLS
+};
+
+struct __packed ipu_fw_psys_process_group {
+ u64 token;
+ u64 private_token;
+ u32 routing_bitmap[IPU_FW_PSYS_RBM_NOF_ELEMS];
+ u32 kernel_bitmap[IPU_FW_PSYS_KBM_NOF_ELEMS];
+ u32 size;
+ u32 psys_server_init_cycles;
+ u32 pg_load_start_ts;
+ u32 pg_load_cycles;
+ u32 pg_init_cycles;
+ u32 pg_processing_cycles;
+ u32 pg_next_frame_init_cycles;
+ u32 pg_complete_cycles;
+ u32 ID;
+ u32 state;
+ u32 ipu_virtual_address;
+ u32 resource_bitmap;
+ u16 fragment_count;
+ u16 fragment_state;
+ u16 fragment_limit;
+ u16 processes_offset;
+ u16 terminals_offset;
+ u8 process_count;
+ u8 terminal_count;
+ u8 subgraph_count;
+ u8 protocol_version;
+ u8 base_queue_id;
+ u8 num_queues;
+ u8 mask_irq;
+ u8 error_handling_enable;
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT];
+};
+
+struct ipu_fw_psys_srv_init {
+ void *host_ddr_pkg_dir;
+ u32 ddr_pkg_dir_address;
+ u32 pkg_dir_size;
+
+ u32 icache_prefetch_sp;
+ u32 icache_prefetch_isp;
+};
+
+struct __packed ipu_fw_psys_cmd {
+ u16 command;
+ u16 msg;
+ u32 context_handle;
+};
+
+struct __packed ipu_fw_psys_event {
+ u16 status;
+ u16 command;
+ u32 context_handle;
+ u64 token;
+};
+
+struct ipu_fw_psys_terminal {
+ u32 terminal_type;
+ s16 parent_offset;
+ u16 size;
+ u16 tm_index;
+ u8 ID;
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT];
+};
+
+struct ipu_fw_psys_param_payload {
+ u64 host_buffer;
+ u32 buffer;
+ u32 terminal_index;
+};
+
+struct ipu_fw_psys_param_terminal {
+ struct ipu_fw_psys_terminal base;
+ struct ipu_fw_psys_param_payload param_payload;
+ u16 param_section_desc_offset;
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT];
+};
+
+struct ipu_fw_psys_frame {
+ u32 buffer_state;
+ u32 access_type;
+ u32 pointer_state;
+ u32 access_scope;
+ u32 data;
+ u32 data_index;
+ u32 data_bytes;
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT];
+};
+
+struct ipu_fw_psys_frame_descriptor {
+ u32 frame_format_type;
+ u32 plane_count;
+ u32 plane_offsets[IPU_FW_PSYS_N_FRAME_PLANES];
+ u32 stride[1];
+ u32 ts_offsets[IPU_FW_PSYS_N_FRAME_PLANES];
+ u16 dimension[2];
+ u16 size;
+ u8 bpp;
+ u8 bpe;
+ u8 is_compressed;
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT];
+};
+
+struct ipu_fw_psys_stream {
+ u64 dummy;
+};
+
+struct ipu_fw_psys_data_terminal {
+ struct ipu_fw_psys_terminal base;
+ struct ipu_fw_psys_frame_descriptor frame_descriptor;
+ struct ipu_fw_psys_frame frame;
+ struct ipu_fw_psys_stream stream;
+ u32 reserved;
+ u32 connection_type;
+ u16 fragment_descriptor_offset;
+ u8 kernel_id;
+ u8 subgraph_id;
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT];
+};
+
+struct ipu_fw_psys_buffer_set {
+ u64 token;
+ u32 kernel_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+ u32 terminal_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+ u32 routing_enable_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+ u32 rbm[IPU_FW_PSYS_RBM_NOF_ELEMS];
+ u32 ipu_virtual_address;
+ u32 process_group_handle;
+ u16 terminal_count;
+ u8 frame_counter;
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT];
+};
+
+struct ipu_fw_psys_program_group_manifest {
+ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+ u32 ID;
+ u16 program_manifest_offset;
+ u16 terminal_manifest_offset;
+ u16 private_data_offset;
+ u16 rbm_manifest_offset;
+ u16 size;
+ u8 alignment;
+ u8 kernel_count;
+ u8 program_count;
+ u8 terminal_count;
+ u8 subgraph_count;
+ u8 reserved[5];
+};
+
+struct ipu_fw_generic_program_manifest {
+ u16 *dev_chn_size;
+ u16 *dev_chn_offset;
+ u16 *ext_mem_size;
+ u16 *ext_mem_offset;
+ u8 cell_id;
+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS];
+ u8 cell_type_id;
+ u8 *is_dfm_relocatable;
+ u32 *dfm_port_bitmap;
+ u32 *dfm_active_port_bitmap;
+};
+
+struct ipu_fw_resource_definitions {
+ u32 num_cells;
+ u32 num_cells_type;
+ const u8 *cells;
+ u32 num_dev_channels;
+ const u16 *dev_channels;
+
+ u32 num_ext_mem_types;
+ u32 num_ext_mem_ids;
+ const u16 *ext_mem_ids;
+
+ u32 num_dfm_ids;
+ const u16 *dfms;
+
+ u32 cell_mem_row;
+ const u8 *cell_mem;
+};
+
+struct ipu6_psys_hw_res_variant {
+ unsigned int queue_num;
+ unsigned int cell_num;
+ int (*set_proc_dev_chn)(struct ipu_fw_psys_process *ptr, u16 offset,
+ u16 value);
+ int (*set_proc_dfm_bitmap)(struct ipu_fw_psys_process *ptr,
+ u16 id, u32 bitmap, u32 active_bitmap);
+ int (*set_proc_ext_mem)(struct ipu_fw_psys_process *ptr,
+ u16 type_id, u16 mem_id, u16 offset);
+ int (*get_pgm_by_proc)(struct ipu_fw_generic_program_manifest *gen_pm,
+ const struct ipu_fw_psys_program_group_manifest
+ *pg_manifest,
+ struct ipu_fw_psys_process *process);
+};
+struct ipu_psys_kcmd;
+struct ipu_psys;
+int ipu_fw_psys_pg_start(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_disown(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_abort(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_submit(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_ppg_suspend(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_ppg_resume(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_load_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_init_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_processing_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_server_init_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_next_frame_init_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_complete_cycles(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_rcv_event(struct ipu_psys *psys,
+ struct ipu_fw_psys_event *event);
+int ipu_fw_psys_terminal_set(struct ipu_fw_psys_terminal *terminal,
+ int terminal_idx,
+ struct ipu_psys_kcmd *kcmd,
+ u32 buffer, unsigned int size);
+void ipu_fw_psys_pg_dump(struct ipu_psys *psys,
+ struct ipu_psys_kcmd *kcmd, const char *note);
+int ipu_fw_psys_pg_get_id(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_get_terminal_count(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_get_size(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_pg_set_ipu_vaddress(struct ipu_psys_kcmd *kcmd,
+ dma_addr_t vaddress);
+struct ipu_fw_psys_terminal *ipu_fw_psys_pg_get_terminal(struct ipu_psys_kcmd
+ *kcmd, int index);
+void ipu_fw_psys_pg_set_token(struct ipu_psys_kcmd *kcmd, u64 token);
+u64 ipu_fw_psys_pg_get_token(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_ppg_set_buffer_set(struct ipu_psys_kcmd *kcmd,
+ struct ipu_fw_psys_terminal *terminal,
+ int terminal_idx, u32 buffer);
+size_t ipu_fw_psys_ppg_get_buffer_set_size(struct ipu_psys_kcmd *kcmd);
+int
+ipu_fw_psys_ppg_buffer_set_vaddress(struct ipu_fw_psys_buffer_set *buf_set,
+ u32 vaddress);
+int ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap(
+ struct ipu_fw_psys_buffer_set *buf_set, u32 *kernel_enable_bitmap);
+struct ipu_fw_psys_buffer_set *
+ipu_fw_psys_ppg_create_buffer_set(struct ipu_psys_kcmd *kcmd,
+ void *kaddr, u32 frame_counter);
+int ipu_fw_psys_ppg_enqueue_bufs(struct ipu_psys_kcmd *kcmd);
+u8 ipu_fw_psys_ppg_get_base_queue_id(struct ipu_psys_kcmd *kcmd);
+void ipu_fw_psys_ppg_set_base_queue_id(struct ipu_psys_kcmd *kcmd, u8 queue_id);
+int ipu_fw_psys_pg_get_protocol(struct ipu_psys_kcmd *kcmd);
+int ipu_fw_psys_open(struct ipu_psys *psys);
+int ipu_fw_psys_close(struct ipu_psys *psys);
+
+/* common resource interface for both abi and api mode */
+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index,
+ u8 value);
+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index);
+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr);
+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+ u16 value);
+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+ u16 type_id, u16 mem_id, u16 offset);
+int ipu_fw_psys_get_program_manifest_by_process(
+ struct ipu_fw_generic_program_manifest *gen_pm,
+ const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+ struct ipu_fw_psys_process *process);
+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+ u16 value);
+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+ u16 id, u32 bitmap,
+ u32 active_bitmap);
+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+ u16 type_id, u16 mem_id, u16 offset);
+int ipu6_fw_psys_get_program_manifest_by_process(
+ struct ipu_fw_generic_program_manifest *gen_pm,
+ const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+ struct ipu_fw_psys_process *process);
+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
+ struct ipu_psys_kcmd *kcmd, const char *note);
+void ipu6_psys_hw_res_variant_init(void);
+#endif /* IPU_FW_PSYS_H */
new file mode 100644
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 - 2021 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <media/ipu-isys.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2-be.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+
+/*
+ * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes.
+ * Otherwise pixel order calculation below WILL BREAK!
+ */
+static const u32 csi2_be_soc_supported_codes_pad[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ 0,
+};
+
+/*
+ * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes.
+ * Otherwise pixel order calculation below WILL BREAK!
+ */
+static const u32 csi2_be_soc_supported_raw_bayer_codes_pad[] = {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ 0,
+};
+
+static int get_supported_code_index(u32 code)
+{
+ int i;
+
+ for (i = 0; csi2_be_soc_supported_raw_bayer_codes_pad[i]; i++) {
+ if (csi2_be_soc_supported_raw_bayer_codes_pad[i] == code)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static const u32 *csi2_be_soc_supported_codes[NR_OF_CSI2_BE_SOC_PADS];
+
+static struct v4l2_subdev_internal_ops csi2_be_soc_sd_internal_ops = {
+ .open = ipu_isys_subdev_open,
+ .close = ipu_isys_subdev_close,
+};
+
+static const struct v4l2_subdev_core_ops csi2_be_soc_sd_core_ops = {
+};
+
+static const struct v4l2_ctrl_config compression_ctrl_cfg = {
+ .ops = NULL,
+ .id = V4L2_CID_IPU_ISYS_COMPRESSION,
+ .name = "ISYS BE-SOC compression",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops csi2_be_soc_sd_video_ops = {
+ .s_stream = set_stream,
+};
+
+static int
+__subdev_link_validate(struct v4l2_subdev *sd, struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+ struct ipu_isys_pipeline,
+ pipe);
+
+ ip->csi2_be_soc = to_ipu_isys_csi2_be_soc(sd);
+ return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt);
+}
+
+static int
+ipu_isys_csi2_be_soc_set_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ struct media_pad *pad = &asd->sd.entity.pads[sel->pad];
+
+ if (sel->target == V4L2_SEL_TGT_CROP &&
+ pad->flags & MEDIA_PAD_FL_SOURCE &&
+ asd->valid_tgts[sel->pad].crop) {
+ enum isys_subdev_prop_tgt tgt =
+ IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP;
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, sel->pad, sel->which);
+
+ if (get_supported_code_index(ffmt->code) < 0) {
+ /* Non-bayer formats can't be odd lines cropped */
+ sel->r.left &= ~1;
+ sel->r.top &= ~1;
+ }
+
+ sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH,
+ IPU_ISYS_MAX_WIDTH);
+
+ sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT,
+ IPU_ISYS_MAX_HEIGHT);
+
+ *__ipu_isys_get_selection(sd, sd_state, sel->target, sel->pad,
+ sel->which) = sel->r;
+ ipu_isys_subdev_fmt_propagate(sd, sd_state, NULL, &sel->r,
+ tgt, sel->pad, sel->which);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static const struct v4l2_subdev_pad_ops csi2_be_soc_sd_pad_ops = {
+ .link_validate = __subdev_link_validate,
+ .get_fmt = ipu_isys_subdev_get_ffmt,
+ .set_fmt = ipu_isys_subdev_set_ffmt,
+ .get_selection = ipu_isys_subdev_get_sel,
+ .set_selection = ipu_isys_csi2_be_soc_set_sel,
+ .enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static struct v4l2_subdev_ops csi2_be_soc_sd_ops = {
+ .core = &csi2_be_soc_sd_core_ops,
+ .video = &csi2_be_soc_sd_video_ops,
+ .pad = &csi2_be_soc_sd_pad_ops,
+};
+
+static struct media_entity_operations csi2_be_soc_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static void csi2_be_soc_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, fmt->pad,
+ fmt->which);
+
+ if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SINK) {
+ if (fmt->format.field != V4L2_FIELD_ALTERNATE)
+ fmt->format.field = V4L2_FIELD_NONE;
+ *ffmt = fmt->format;
+
+ ipu_isys_subdev_fmt_propagate(sd, sd_state, &fmt->format,
+ NULL,
+ IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT,
+ fmt->pad, fmt->which);
+ } else if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) {
+ struct v4l2_mbus_framefmt *sink_ffmt;
+ struct v4l2_rect *r = __ipu_isys_get_selection(sd, sd_state,
+ V4L2_SEL_TGT_CROP, fmt->pad, fmt->which);
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ u32 code;
+ int idx;
+
+ sink_ffmt = __ipu_isys_get_ffmt(sd, sd_state, 0, fmt->which);
+ code = sink_ffmt->code;
+ idx = get_supported_code_index(code);
+
+ if (asd->valid_tgts[fmt->pad].crop && idx >= 0) {
+ int crop_info = 0;
+
+ /* Only croping odd line at top side. */
+ if (r->top & 1)
+ crop_info |= CSI2_BE_CROP_VER;
+
+ code = csi2_be_soc_supported_raw_bayer_codes_pad
+ [((idx & CSI2_BE_CROP_MASK) ^ crop_info)
+ + (idx & ~CSI2_BE_CROP_MASK)];
+
+ }
+ ffmt->code = code;
+ ffmt->width = r->width;
+ ffmt->height = r->height;
+ ffmt->field = sink_ffmt->field;
+
+ }
+}
+
+void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be_soc)
+{
+ v4l2_device_unregister_subdev(&csi2_be_soc->asd.sd);
+ ipu_isys_subdev_cleanup(&csi2_be_soc->asd);
+ v4l2_ctrl_handler_free(&csi2_be_soc->av.ctrl_handler);
+ ipu_isys_video_cleanup(&csi2_be_soc->av);
+}
+
+int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc,
+ struct ipu_isys *isys, int index)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CSI2_BE_SOC_PAD_SINK,
+ .format = {
+ .width = 4096,
+ .height = 3072,
+ },
+ };
+ int rval, i;
+
+ csi2_be_soc->asd.sd.entity.ops = &csi2_be_soc_entity_ops;
+ csi2_be_soc->asd.isys = isys;
+
+ rval = ipu_isys_subdev_init(&csi2_be_soc->asd,
+ &csi2_be_soc_sd_ops, 0,
+ NR_OF_CSI2_BE_SOC_PADS,
+ NR_OF_CSI2_BE_SOC_SOURCE_PADS,
+ NR_OF_CSI2_BE_SOC_SINK_PADS, 0);
+ if (rval)
+ goto fail;
+
+ csi2_be_soc->asd.pad[CSI2_BE_SOC_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csi2_be_soc->asd.pad[CSI2_BE_SOC_PAD_SOURCE].flags =
+ MEDIA_PAD_FL_SOURCE;
+ csi2_be_soc->asd.valid_tgts[CSI2_BE_SOC_PAD_SOURCE].crop = true;
+
+ for (i = 0; i < NR_OF_CSI2_BE_SOC_PADS; i++)
+ csi2_be_soc_supported_codes[i] =
+ csi2_be_soc_supported_codes_pad;
+ csi2_be_soc->asd.supported_codes = csi2_be_soc_supported_codes;
+ csi2_be_soc->asd.be_mode = IPU_BE_SOC;
+ csi2_be_soc->asd.isl_mode = IPU_ISL_OFF;
+ csi2_be_soc->asd.set_ffmt = csi2_be_soc_set_ffmt;
+
+ fmt.pad = CSI2_BE_SOC_PAD_SINK;
+ ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt);
+ fmt.pad = CSI2_BE_SOC_PAD_SOURCE;
+ ipu_isys_subdev_set_ffmt(&csi2_be_soc->asd.sd, NULL, &fmt);
+ csi2_be_soc->asd.sd.internal_ops = &csi2_be_soc_sd_internal_ops;
+
+ snprintf(csi2_be_soc->asd.sd.name, sizeof(csi2_be_soc->asd.sd.name),
+ IPU_ISYS_ENTITY_PREFIX " CSI2 BE SOC %d", index);
+ v4l2_set_subdevdata(&csi2_be_soc->asd.sd, &csi2_be_soc->asd);
+
+ rval = v4l2_device_register_subdev(&isys->v4l2_dev,
+ &csi2_be_soc->asd.sd);
+ if (rval) {
+ dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+ goto fail;
+ }
+
+ snprintf(csi2_be_soc->av.vdev.name, sizeof(csi2_be_soc->av.vdev.name),
+ IPU_ISYS_ENTITY_PREFIX " BE SOC capture %d", index);
+
+ /*
+ * Pin type could be overwritten for YUV422 to I420 case, at
+ * set_format phase
+ */
+ csi2_be_soc->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_SOC;
+ csi2_be_soc->av.isys = isys;
+ csi2_be_soc->av.pfmts = ipu_isys_pfmts_be_soc;
+ csi2_be_soc->av.try_fmt_vid_mplane =
+ ipu_isys_video_try_fmt_vid_mplane_default;
+ csi2_be_soc->av.prepare_fw_stream =
+ ipu_isys_prepare_fw_cfg_default;
+ csi2_be_soc->av.aq.buf_prepare = ipu_isys_buf_prepare;
+ csi2_be_soc->av.aq.fill_frame_buff_set_pin =
+ ipu_isys_buffer_to_fw_frame_buff_pin;
+ csi2_be_soc->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate;
+ csi2_be_soc->av.aq.vbq.buf_struct_size =
+ sizeof(struct ipu_isys_video_buffer);
+
+ /* create v4l2 ctrl for be-soc video node */
+ rval = v4l2_ctrl_handler_init(&csi2_be_soc->av.ctrl_handler, 0);
+ if (rval) {
+ dev_err(&isys->adev->dev,
+ "failed to init v4l2 ctrl handler for be_soc\n");
+ goto fail;
+ }
+
+ csi2_be_soc->av.compression_ctrl =
+ v4l2_ctrl_new_custom(&csi2_be_soc->av.ctrl_handler,
+ &compression_ctrl_cfg, NULL);
+ if (!csi2_be_soc->av.compression_ctrl) {
+ dev_err(&isys->adev->dev,
+ "failed to create BE-SOC cmprs ctrl\n");
+ goto fail;
+ }
+ csi2_be_soc->av.compression = 0;
+ csi2_be_soc->av.vdev.ctrl_handler =
+ &csi2_be_soc->av.ctrl_handler;
+
+ rval = ipu_isys_video_init(&csi2_be_soc->av,
+ &csi2_be_soc->asd.sd.entity,
+ CSI2_BE_SOC_PAD_SOURCE,
+ MEDIA_PAD_FL_SINK,
+ MEDIA_LNK_FL_DYNAMIC);
+ if (rval) {
+ dev_info(&isys->adev->dev, "can't init video node\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipu_isys_csi2_be_soc_cleanup(csi2_be_soc);
+
+ return rval;
+}
new file mode 100644
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 - 2020 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <media/ipu-isys.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2-be.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+
+/*
+ * Raw bayer format pixel order MUST BE MAINTAINED in groups of four codes.
+ * Otherwise pixel order calculation below WILL BREAK!
+ */
+static const u32 csi2_be_supported_codes_pad[] = {
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ 0,
+};
+
+static const u32 *csi2_be_supported_codes[] = {
+ csi2_be_supported_codes_pad,
+ csi2_be_supported_codes_pad,
+};
+
+static struct v4l2_subdev_internal_ops csi2_be_sd_internal_ops = {
+ .open = ipu_isys_subdev_open,
+ .close = ipu_isys_subdev_close,
+};
+
+static const struct v4l2_subdev_core_ops csi2_be_sd_core_ops = {
+};
+
+static const struct v4l2_ctrl_config compression_ctrl_cfg = {
+ .ops = NULL,
+ .id = V4L2_CID_IPU_ISYS_COMPRESSION,
+ .name = "ISYS CSI-BE compression",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 0,
+};
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops csi2_be_sd_video_ops = {
+ .s_stream = set_stream,
+};
+
+static int __subdev_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+ struct ipu_isys_pipeline,
+ pipe);
+
+ ip->csi2_be = to_ipu_isys_csi2_be(sd);
+ return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt);
+}
+
+static int get_supported_code_index(u32 code)
+{
+ int i;
+
+ for (i = 0; csi2_be_supported_codes_pad[i]; i++) {
+ if (csi2_be_supported_codes_pad[i] == code)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int ipu_isys_csi2_be_set_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ struct media_pad *pad = &asd->sd.entity.pads[sel->pad];
+
+ if (sel->target == V4L2_SEL_TGT_CROP &&
+ pad->flags & MEDIA_PAD_FL_SOURCE &&
+ asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop) {
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, sel->pad, sel->which);
+ struct v4l2_rect *r = __ipu_isys_get_selection
+ (sd, sd_state, sel->target, CSI2_BE_PAD_SINK, sel->which);
+
+ if (get_supported_code_index(ffmt->code) < 0) {
+ /* Non-bayer formats can't be single line cropped */
+ sel->r.left &= ~1;
+ sel->r.top &= ~1;
+
+ /* Non-bayer formats can't pe padded at all */
+ sel->r.width = clamp(sel->r.width,
+ IPU_ISYS_MIN_WIDTH, r->width);
+ } else {
+ sel->r.width = clamp(sel->r.width,
+ IPU_ISYS_MIN_WIDTH,
+ IPU_ISYS_MAX_WIDTH);
+ }
+
+ /*
+ * Vertical padding is not supported, height is
+ * restricted by sink pad resolution.
+ */
+ sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT,
+ r->height);
+ *__ipu_isys_get_selection(sd, sd_state, sel->target,
+ sel->pad, sel->which) = sel->r;
+ ipu_isys_subdev_fmt_propagate
+ (sd, sd_state, NULL, &sel->r,
+ IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP,
+ sel->pad, sel->which);
+ return 0;
+ }
+ return ipu_isys_subdev_set_sel(sd, sd_state, sel);
+}
+
+static const struct v4l2_subdev_pad_ops csi2_be_sd_pad_ops = {
+ .link_validate = __subdev_link_validate,
+ .get_fmt = ipu_isys_subdev_get_ffmt,
+ .set_fmt = ipu_isys_subdev_set_ffmt,
+ .get_selection = ipu_isys_subdev_get_sel,
+ .set_selection = ipu_isys_csi2_be_set_sel,
+ .enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static struct v4l2_subdev_ops csi2_be_sd_ops = {
+ .core = &csi2_be_sd_core_ops,
+ .video = &csi2_be_sd_video_ops,
+ .pad = &csi2_be_sd_pad_ops,
+};
+
+static struct media_entity_operations csi2_be_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static void csi2_be_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, fmt->pad, fmt->which);
+
+ switch (fmt->pad) {
+ case CSI2_BE_PAD_SINK:
+ if (fmt->format.field != V4L2_FIELD_ALTERNATE)
+ fmt->format.field = V4L2_FIELD_NONE;
+ *ffmt = fmt->format;
+
+ ipu_isys_subdev_fmt_propagate
+ (sd, sd_state, &fmt->format, NULL,
+ IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT, fmt->pad, fmt->which);
+ return;
+ case CSI2_BE_PAD_SOURCE: {
+ struct v4l2_mbus_framefmt *sink_ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, CSI2_BE_PAD_SINK,
+ fmt->which);
+ struct v4l2_rect *r =
+ __ipu_isys_get_selection(sd, sd_state, V4L2_SEL_TGT_CROP,
+ CSI2_BE_PAD_SOURCE,
+ fmt->which);
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ u32 code = sink_ffmt->code;
+ int idx = get_supported_code_index(code);
+
+ if (asd->valid_tgts[CSI2_BE_PAD_SOURCE].crop && idx >= 0) {
+ int crop_info = 0;
+
+ if (r->top & 1)
+ crop_info |= CSI2_BE_CROP_VER;
+ if (r->left & 1)
+ crop_info |= CSI2_BE_CROP_HOR;
+ code = csi2_be_supported_codes_pad
+ [((idx & CSI2_BE_CROP_MASK) ^ crop_info)
+ + (idx & ~CSI2_BE_CROP_MASK)];
+ }
+ ffmt->width = r->width;
+ ffmt->height = r->height;
+ ffmt->code = code;
+ ffmt->field = sink_ffmt->field;
+ return;
+ }
+ default:
+ dev_err(&csi2->isys->adev->dev, "Unknown pad type\n");
+ WARN_ON(1);
+ }
+}
+
+void ipu_isys_csi2_be_cleanup(struct ipu_isys_csi2_be *csi2_be)
+{
+ v4l2_ctrl_handler_free(&csi2_be->av.ctrl_handler);
+ v4l2_device_unregister_subdev(&csi2_be->asd.sd);
+ ipu_isys_subdev_cleanup(&csi2_be->asd);
+ ipu_isys_video_cleanup(&csi2_be->av);
+}
+
+int ipu_isys_csi2_be_init(struct ipu_isys_csi2_be *csi2_be,
+ struct ipu_isys *isys)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CSI2_BE_PAD_SINK,
+ .format = {
+ .width = 4096,
+ .height = 3072,
+ },
+ };
+ struct v4l2_subdev_selection sel = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CSI2_BE_PAD_SOURCE,
+ .target = V4L2_SEL_TGT_CROP,
+ .r = {
+ .width = fmt.format.width,
+ .height = fmt.format.height,
+ },
+ };
+ int rval;
+
+ csi2_be->asd.sd.entity.ops = &csi2_be_entity_ops;
+ csi2_be->asd.isys = isys;
+
+ rval = ipu_isys_subdev_init(&csi2_be->asd, &csi2_be_sd_ops, 0,
+ NR_OF_CSI2_BE_PADS,
+ NR_OF_CSI2_BE_SOURCE_PADS,
+ NR_OF_CSI2_BE_SINK_PADS, 0);
+ if (rval)
+ goto fail;
+
+ csi2_be->asd.pad[CSI2_BE_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ csi2_be->asd.pad[CSI2_BE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ csi2_be->asd.valid_tgts[CSI2_BE_PAD_SOURCE].crop = true;
+ csi2_be->asd.set_ffmt = csi2_be_set_ffmt;
+
+ BUILD_BUG_ON(ARRAY_SIZE(csi2_be_supported_codes) != NR_OF_CSI2_BE_PADS);
+ csi2_be->asd.supported_codes = csi2_be_supported_codes;
+ csi2_be->asd.be_mode = IPU_BE_RAW;
+ csi2_be->asd.isl_mode = IPU_ISL_CSI2_BE;
+
+ ipu_isys_subdev_set_ffmt(&csi2_be->asd.sd, NULL, &fmt);
+ ipu_isys_csi2_be_set_sel(&csi2_be->asd.sd, NULL, &sel);
+
+ csi2_be->asd.sd.internal_ops = &csi2_be_sd_internal_ops;
+ snprintf(csi2_be->asd.sd.name, sizeof(csi2_be->asd.sd.name),
+ IPU_ISYS_ENTITY_PREFIX " CSI2 BE");
+ snprintf(csi2_be->av.vdev.name, sizeof(csi2_be->av.vdev.name),
+ IPU_ISYS_ENTITY_PREFIX " CSI2 BE capture");
+ csi2_be->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_RAW_NS;
+ v4l2_set_subdevdata(&csi2_be->asd.sd, &csi2_be->asd);
+ rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2_be->asd.sd);
+ if (rval) {
+ dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+ goto fail;
+ }
+
+ csi2_be->av.isys = isys;
+ csi2_be->av.pfmts = ipu_isys_pfmts;
+ csi2_be->av.try_fmt_vid_mplane =
+ ipu_isys_video_try_fmt_vid_mplane_default;
+ csi2_be->av.prepare_fw_stream =
+ ipu_isys_prepare_fw_cfg_default;
+ csi2_be->av.aq.buf_prepare = ipu_isys_buf_prepare;
+ csi2_be->av.aq.fill_frame_buff_set_pin =
+ ipu_isys_buffer_to_fw_frame_buff_pin;
+ csi2_be->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate;
+ csi2_be->av.aq.vbq.buf_struct_size =
+ sizeof(struct ipu_isys_video_buffer);
+
+ /* create v4l2 ctrl for csi-be video node */
+ rval = v4l2_ctrl_handler_init(&csi2_be->av.ctrl_handler, 0);
+ if (rval) {
+ dev_err(&isys->adev->dev,
+ "failed to init v4l2 ctrl handler for csi2_be\n");
+ goto fail;
+ }
+
+ csi2_be->av.compression_ctrl =
+ v4l2_ctrl_new_custom(&csi2_be->av.ctrl_handler,
+ &compression_ctrl_cfg, NULL);
+ if (!csi2_be->av.compression_ctrl) {
+ dev_err(&isys->adev->dev,
+ "failed to create CSI-BE cmprs ctrl\n");
+ goto fail;
+ }
+ csi2_be->av.compression = 0;
+ csi2_be->av.vdev.ctrl_handler = &csi2_be->av.ctrl_handler;
+
+ rval = ipu_isys_video_init(&csi2_be->av, &csi2_be->asd.sd.entity,
+ CSI2_BE_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0);
+ if (rval) {
+ dev_info(&isys->adev->dev, "can't init video node\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipu_isys_csi2_be_cleanup(csi2_be);
+
+ return rval;
+}
new file mode 100644
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_CSI2_BE_H
+#define IPU_ISYS_CSI2_BE_H
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu-isys-queue.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-isys.h"
+
+struct ipu_isys_csi2_be_pdata;
+struct ipu_isys;
+
+#define CSI2_BE_PAD_SINK 0
+#define CSI2_BE_PAD_SOURCE 1
+
+#define NR_OF_CSI2_BE_PADS 2
+#define NR_OF_CSI2_BE_SOURCE_PADS 1
+#define NR_OF_CSI2_BE_SINK_PADS 1
+
+#define NR_OF_CSI2_BE_SOC_SOURCE_PADS 1
+#define NR_OF_CSI2_BE_SOC_SINK_PADS 1
+#define CSI2_BE_SOC_PAD_SINK 0
+#define CSI2_BE_SOC_PAD_SOURCE 1
+#define NR_OF_CSI2_BE_SOC_PADS \
+ (NR_OF_CSI2_BE_SOC_SOURCE_PADS + NR_OF_CSI2_BE_SOC_SINK_PADS)
+
+#define CSI2_BE_CROP_HOR BIT(0)
+#define CSI2_BE_CROP_VER BIT(1)
+#define CSI2_BE_CROP_MASK (CSI2_BE_CROP_VER | CSI2_BE_CROP_HOR)
+
+/*
+ * struct ipu_isys_csi2_be
+ */
+struct ipu_isys_csi2_be {
+ struct ipu_isys_csi2_be_pdata *pdata;
+ struct ipu_isys_subdev asd;
+ struct ipu_isys_video av;
+};
+
+struct ipu_isys_csi2_be_soc {
+ struct ipu_isys_csi2_be_pdata *pdata;
+ struct ipu_isys_subdev asd;
+ struct ipu_isys_video av;
+};
+
+#define to_ipu_isys_csi2_be(sd) \
+ container_of(to_ipu_isys_subdev(sd), \
+ struct ipu_isys_csi2_be, asd)
+
+#define to_ipu_isys_csi2_be_soc(sd) \
+ container_of(to_ipu_isys_subdev(sd), \
+ struct ipu_isys_csi2_be_soc, asd)
+
+int ipu_isys_csi2_be_soc_init(struct ipu_isys_csi2_be_soc *csi2_be_soc,
+ struct ipu_isys *isys, int index);
+void ipu_isys_csi2_be_soc_cleanup(struct ipu_isys_csi2_be_soc *csi2_be);
+
+#endif /* IPU_ISYS_CSI2_BE_H */
new file mode 100644
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2021 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/vsc.h>
+
+#include <media/ipu-isys.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-regs.h"
+
+static const u32 csi2_supported_codes_pad_sink[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ 0,
+};
+
+static const u32 csi2_supported_codes_pad_source[] = {
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_RGB565_1X16,
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_UYVY8_1X16,
+ MEDIA_BUS_FMT_YUYV8_1X16,
+ MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SBGGR12_1X12,
+ MEDIA_BUS_FMT_SGBRG12_1X12,
+ MEDIA_BUS_FMT_SGRBG12_1X12,
+ MEDIA_BUS_FMT_SRGGB12_1X12,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ 0,
+};
+
+static const u32 *csi2_supported_codes[NR_OF_CSI2_PADS];
+
+static struct v4l2_subdev_internal_ops csi2_sd_internal_ops = {
+ .open = ipu_isys_subdev_open,
+ .close = ipu_isys_subdev_close,
+};
+
+int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq)
+{
+ struct ipu_isys_pipeline *pipe = container_of(csi2->asd.sd.entity.pipe,
+ struct ipu_isys_pipeline,
+ pipe);
+ struct v4l2_subdev *ext_sd =
+ media_entity_to_v4l2_subdev(pipe->external->entity);
+ struct v4l2_ext_control c = {.id = V4L2_CID_LINK_FREQ, };
+ struct v4l2_ext_controls cs = {.count = 1,
+ .controls = &c,
+ };
+ struct v4l2_querymenu qm = {.id = c.id, };
+ int rval;
+
+ if (!ext_sd) {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+ rval = v4l2_g_ext_ctrls(ext_sd->ctrl_handler,
+ ext_sd->devnode,
+ ext_sd->v4l2_dev->mdev,
+ &cs);
+ if (rval) {
+ dev_info(&csi2->isys->adev->dev, "can't get link frequency\n");
+ return rval;
+ }
+
+ qm.index = c.value;
+
+ rval = v4l2_querymenu(ext_sd->ctrl_handler, &qm);
+ if (rval) {
+ dev_info(&csi2->isys->adev->dev, "can't get menu item\n");
+ return rval;
+ }
+
+ dev_dbg(&csi2->isys->adev->dev, "%s: link frequency %lld\n", __func__,
+ qm.value);
+
+ if (!qm.value)
+ return -EINVAL;
+ *link_freq = qm.value;
+ return 0;
+}
+
+static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+
+ dev_dbg(&csi2->isys->adev->dev, "subscribe event(type %u id %u)\n",
+ sub->type, sub->id);
+
+ switch (sub->type) {
+ case V4L2_EVENT_FRAME_SYNC:
+ return v4l2_event_subscribe(fh, sub, 10, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_subdev_core_ops csi2_sd_core_ops = {
+ .subscribe_event = subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+/*
+ * The input system CSI2+ receiver has several
+ * parameters affecting the receiver timings. These depend
+ * on the MIPI bus frequency F in Hz (sensor transmitter rate)
+ * as follows:
+ * register value = (A/1e9 + B * UI) / COUNT_ACC
+ * where
+ * UI = 1 / (2 * F) in seconds
+ * COUNT_ACC = counter accuracy in seconds
+ * For legacy IPU, COUNT_ACC = 0.125 ns
+ *
+ * A and B are coefficients from the table below,
+ * depending whether the register minimum or maximum value is
+ * calculated.
+ * Minimum Maximum
+ * Clock lane A B A B
+ * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
+ * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
+ * Data lanes
+ * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
+ * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
+ * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
+ *
+ * We use the minimum values of both A and B.
+ */
+
+#define DIV_SHIFT 8
+
+static uint32_t calc_timing(s32 a, int32_t b, int64_t link_freq, int32_t accinv)
+{
+ return accinv * a + (accinv * b * (500000000 >> DIV_SHIFT)
+ / (int32_t)(link_freq >> DIV_SHIFT));
+}
+
+static int
+ipu_isys_csi2_calc_timing(struct ipu_isys_csi2 *csi2,
+ struct ipu_isys_csi2_timing *timing, uint32_t accinv)
+{
+ __s64 link_freq;
+ int rval;
+
+ rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq);
+ if (rval)
+ return rval;
+
+ timing->ctermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A,
+ CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B,
+ link_freq, accinv);
+ timing->csettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A,
+ CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B,
+ link_freq, accinv);
+ dev_dbg(&csi2->isys->adev->dev, "ctermen %u\n", timing->ctermen);
+ dev_dbg(&csi2->isys->adev->dev, "csettle %u\n", timing->csettle);
+
+ timing->dtermen = calc_timing(CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A,
+ CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B,
+ link_freq, accinv);
+ timing->dsettle = calc_timing(CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A,
+ CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B,
+ link_freq, accinv);
+ dev_dbg(&csi2->isys->adev->dev, "dtermen %u\n", timing->dtermen);
+ dev_dbg(&csi2->isys->adev->dev, "dsettle %u\n", timing->dsettle);
+
+ return 0;
+}
+
+#define CSI2_ACCINV 8
+
+static int set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+ struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+ struct ipu_isys_pipeline,
+ pipe);
+ struct ipu_isys_csi2_config *cfg;
+ struct v4l2_subdev *ext_sd;
+ struct ipu_isys_csi2_timing timing = {0};
+ unsigned int nlanes;
+ int rval;
+ struct vsc_mipi_config conf;
+ struct vsc_camera_status status;
+ s64 link_freq;
+
+ dev_dbg(&csi2->isys->adev->dev, "csi2 s_stream %d\n", enable);
+
+ if (!ip->external->entity) {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+ ext_sd = media_entity_to_v4l2_subdev(ip->external->entity);
+ cfg = v4l2_get_subdev_hostdata(ext_sd);
+
+ if (!enable) {
+ ipu_isys_csi2_set_stream(sd, timing, 0, enable);
+ rval = vsc_release_camera_sensor(&status);
+ if (rval && rval != -EAGAIN) {
+ dev_err(&csi2->isys->adev->dev,
+ "Release VSC failed.\n");
+ return rval;
+ }
+ return 0;
+ }
+
+ ip->has_sof = true;
+
+ nlanes = cfg->nlanes;
+
+ dev_dbg(&csi2->isys->adev->dev, "lane nr %d.\n", nlanes);
+
+ rval = ipu_isys_csi2_calc_timing(csi2, &timing, CSI2_ACCINV);
+ if (rval)
+ return rval;
+
+ rval = ipu_isys_csi2_set_stream(sd, timing, nlanes, enable);
+ rval = ipu_isys_csi2_get_link_freq(csi2, &link_freq);
+ if (rval)
+ return rval;
+
+ conf.lane_num = nlanes;
+ /* frequency unit 100k */
+ conf.freq = link_freq / 100000;
+ rval = vsc_acquire_camera_sensor(&conf, NULL, NULL, &status);
+ if (rval && rval != -EAGAIN) {
+ dev_err(&csi2->isys->adev->dev, "Acquire VSC failed.\n");
+ return rval;
+ } else {
+ return 0;
+ }
+
+ return rval;
+}
+
+static void csi2_capture_done(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *info)
+{
+ if (ip->interlaced && ip->isys->short_packet_source ==
+ IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) {
+ struct ipu_isys_buffer *ib;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+ if (!list_empty(&ip->short_packet_active)) {
+ ib = list_last_entry(&ip->short_packet_active,
+ struct ipu_isys_buffer, head);
+ list_move(&ib->head, &ip->short_packet_incoming);
+ }
+ spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+ }
+}
+
+static int csi2_link_validate(struct media_link *link)
+{
+ struct ipu_isys_csi2 *csi2;
+ struct ipu_isys_pipeline *ip;
+ int rval;
+
+ if (!link->sink->entity ||
+ !link->sink->entity->pipe || !link->source->entity)
+ return -EINVAL;
+ csi2 =
+ to_ipu_isys_csi2(media_entity_to_v4l2_subdev(link->sink->entity));
+ ip = to_ipu_isys_pipeline(link->sink->entity->pipe);
+ csi2->receiver_errors = 0;
+ ip->csi2 = csi2;
+ ipu_isys_video_add_capture_done(to_ipu_isys_pipeline
+ (link->sink->entity->pipe),
+ csi2_capture_done);
+
+ rval = v4l2_subdev_link_validate(link);
+ if (rval)
+ return rval;
+
+ if (!v4l2_ctrl_g_ctrl(csi2->store_csi2_header)) {
+ struct media_pad *remote_pad =
+ media_entity_remote_pad(&csi2->asd.pad[CSI2_PAD_SOURCE]);
+
+ if (remote_pad &&
+ is_media_entity_v4l2_subdev(remote_pad->entity)) {
+ dev_err(&csi2->isys->adev->dev,
+ "CSI2 BE requires CSI2 headers.\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops csi2_sd_video_ops = {
+ .s_stream = set_stream,
+};
+
+static int ipu_isys_csi2_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return ipu_isys_subdev_get_ffmt(sd, sd_state, fmt);
+}
+
+static int ipu_isys_csi2_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ return ipu_isys_subdev_set_ffmt(sd, sd_state, fmt);
+}
+
+static int __subdev_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+ struct ipu_isys_pipeline,
+ pipe);
+
+ if (source_fmt->format.field == V4L2_FIELD_ALTERNATE)
+ ip->interlaced = true;
+
+ return ipu_isys_subdev_link_validate(sd, link, source_fmt, sink_fmt);
+}
+
+static const struct v4l2_subdev_pad_ops csi2_sd_pad_ops = {
+ .link_validate = __subdev_link_validate,
+ .get_fmt = ipu_isys_csi2_get_fmt,
+ .set_fmt = ipu_isys_csi2_set_fmt,
+ .enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static struct v4l2_subdev_ops csi2_sd_ops = {
+ .core = &csi2_sd_core_ops,
+ .video = &csi2_sd_video_ops,
+ .pad = &csi2_sd_pad_ops,
+};
+
+static struct media_entity_operations csi2_entity_ops = {
+ .link_validate = csi2_link_validate,
+};
+
+static void csi2_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ enum isys_subdev_prop_tgt tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT;
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, fmt->pad,
+ fmt->which);
+
+ if (fmt->format.field != V4L2_FIELD_ALTERNATE)
+ fmt->format.field = V4L2_FIELD_NONE;
+
+ if (fmt->pad == CSI2_PAD_SINK) {
+ *ffmt = fmt->format;
+ ipu_isys_subdev_fmt_propagate(sd, sd_state, &fmt->format, NULL,
+ tgt, fmt->pad, fmt->which);
+ return;
+ }
+
+ if (sd->entity.pads[fmt->pad].flags & MEDIA_PAD_FL_SOURCE) {
+ ffmt->width = fmt->format.width;
+ ffmt->height = fmt->format.height;
+ ffmt->field = fmt->format.field;
+ ffmt->code =
+ ipu_isys_subdev_code_to_uncompressed(fmt->format.code);
+ return;
+ }
+
+ WARN_ON(1);
+}
+
+static const struct ipu_isys_pixelformat *
+csi2_try_fmt(struct ipu_isys_video *av,
+ struct v4l2_pix_format_mplane *mpix)
+{
+ struct media_link *link = list_first_entry(&av->vdev.entity.links,
+ struct media_link, list);
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct ipu_isys_csi2 *csi2;
+
+ if (!sd)
+ return NULL;
+
+ csi2 = to_ipu_isys_csi2(sd);
+
+ return ipu_isys_video_try_fmt_vid_mplane(av, mpix,
+ v4l2_ctrl_g_ctrl(csi2->store_csi2_header));
+}
+
+void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2)
+{
+ if (!csi2->isys)
+ return;
+
+ v4l2_device_unregister_subdev(&csi2->asd.sd);
+ ipu_isys_subdev_cleanup(&csi2->asd);
+ csi2->isys = NULL;
+}
+
+static void csi_ctrl_init(struct v4l2_subdev *sd)
+{
+ struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+
+ static const struct v4l2_ctrl_config cfg = {
+ .id = V4L2_CID_IPU_STORE_CSI2_HEADER,
+ .name = "Store CSI-2 Headers",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+ };
+
+ csi2->store_csi2_header = v4l2_ctrl_new_custom(&csi2->asd.ctrl_handler,
+ &cfg, NULL);
+}
+
+int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2,
+ struct ipu_isys *isys,
+ void __iomem *base, unsigned int index)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = CSI2_PAD_SINK,
+ .format = {
+ .width = 4096,
+ .height = 3072,
+ },
+ };
+ int i, rval, src;
+
+ dev_dbg(&isys->adev->dev, "csi-%d base = 0x%lx\n", index,
+ (unsigned long)base);
+ csi2->isys = isys;
+ csi2->base = base;
+ csi2->index = index;
+
+ csi2->asd.sd.entity.ops = &csi2_entity_ops;
+ csi2->asd.ctrl_init = csi_ctrl_init;
+ csi2->asd.isys = isys;
+ init_completion(&csi2->eof_completion);
+ rval = ipu_isys_subdev_init(&csi2->asd, &csi2_sd_ops, 0,
+ NR_OF_CSI2_PADS,
+ NR_OF_CSI2_SOURCE_PADS,
+ NR_OF_CSI2_SINK_PADS,
+ 0);
+ if (rval)
+ goto fail;
+
+ csi2->asd.pad[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
+ csi2->asd.pad[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ src = index;
+ csi2->asd.source = IPU_FW_ISYS_STREAM_SRC_CSI2_PORT0 + src;
+ csi2_supported_codes[CSI2_PAD_SINK] = csi2_supported_codes_pad_sink;
+
+ for (i = 0; i < NR_OF_CSI2_SOURCE_PADS; i++)
+ csi2_supported_codes[i + 1] = csi2_supported_codes_pad_source;
+ csi2->asd.supported_codes = csi2_supported_codes;
+ csi2->asd.set_ffmt = csi2_set_ffmt;
+
+ csi2->asd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ csi2->asd.sd.internal_ops = &csi2_sd_internal_ops;
+ snprintf(csi2->asd.sd.name, sizeof(csi2->asd.sd.name),
+ IPU_ISYS_ENTITY_PREFIX " CSI-2 %u", index);
+ v4l2_set_subdevdata(&csi2->asd.sd, &csi2->asd);
+
+ rval = v4l2_device_register_subdev(&isys->v4l2_dev, &csi2->asd.sd);
+ if (rval) {
+ dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+ goto fail;
+ }
+
+ mutex_lock(&csi2->asd.mutex);
+ __ipu_isys_subdev_set_ffmt(&csi2->asd.sd, NULL, &fmt);
+ mutex_unlock(&csi2->asd.mutex);
+
+ return 0;
+
+fail:
+ ipu_isys_csi2_cleanup(csi2);
+
+ return rval;
+}
+
+void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2)
+{
+ struct ipu_isys_pipeline *ip = NULL;
+ struct v4l2_event ev = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+ struct video_device *vdev = csi2->asd.sd.devnode;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&csi2->isys->lock, flags);
+ csi2->in_frame = true;
+
+ for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+ if (csi2->isys->pipes[i] &&
+ csi2->isys->pipes[i]->csi2 == csi2) {
+ ip = csi2->isys->pipes[i];
+ break;
+ }
+ }
+
+ /* Pipe already vanished */
+ if (!ip) {
+ spin_unlock_irqrestore(&csi2->isys->lock, flags);
+ return;
+ }
+
+ ev.u.frame_sync.frame_sequence = atomic_inc_return(&ip->sequence) - 1;
+ spin_unlock_irqrestore(&csi2->isys->lock, flags);
+
+ v4l2_event_queue(vdev, &ev);
+ dev_dbg(&csi2->isys->adev->dev,
+ "sof_event::csi2-%i sequence: %i\n",
+ csi2->index, ev.u.frame_sync.frame_sequence);
+}
+
+void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2)
+{
+ struct ipu_isys_pipeline *ip = NULL;
+ unsigned long flags;
+ unsigned int i;
+ u32 frame_sequence;
+
+ spin_lock_irqsave(&csi2->isys->lock, flags);
+ csi2->in_frame = false;
+ if (csi2->wait_for_sync)
+ complete(&csi2->eof_completion);
+
+ for (i = 0; i < IPU_ISYS_MAX_STREAMS; i++) {
+ if (csi2->isys->pipes[i] &&
+ csi2->isys->pipes[i]->csi2 == csi2) {
+ ip = csi2->isys->pipes[i];
+ break;
+ }
+ }
+
+ if (ip) {
+ frame_sequence = atomic_read(&ip->sequence);
+ spin_unlock_irqrestore(&csi2->isys->lock, flags);
+
+ dev_dbg(&csi2->isys->adev->dev,
+ "eof_event::csi2-%i sequence: %i\n",
+ csi2->index, frame_sequence);
+ return;
+ }
+
+ spin_unlock_irqrestore(&csi2->isys->lock, flags);
+}
+
+/* Call this function only _after_ the sensor has been stopped */
+void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2)
+{
+ unsigned long flags, tout;
+
+ spin_lock_irqsave(&csi2->isys->lock, flags);
+
+ if (!csi2->in_frame) {
+ spin_unlock_irqrestore(&csi2->isys->lock, flags);
+ return;
+ }
+
+ reinit_completion(&csi2->eof_completion);
+ csi2->wait_for_sync = true;
+ spin_unlock_irqrestore(&csi2->isys->lock, flags);
+ tout = wait_for_completion_timeout(&csi2->eof_completion,
+ IPU_EOF_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_err(&csi2->isys->adev->dev,
+ "csi2-%d: timeout at sync to eof\n",
+ csi2->index);
+ csi2->wait_for_sync = false;
+}
+
+struct ipu_isys_buffer *
+ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip,
+ struct ipu_isys_buffer_list *bl)
+{
+ struct ipu_isys_buffer *ib;
+ struct ipu_isys_private_buffer *pb;
+ struct ipu_isys_mipi_packet_header *ph;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+ if (list_empty(&ip->short_packet_incoming)) {
+ spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+ return NULL;
+ }
+ ib = list_last_entry(&ip->short_packet_incoming,
+ struct ipu_isys_buffer, head);
+ pb = ipu_isys_buffer_to_private_buffer(ib);
+ ph = (struct ipu_isys_mipi_packet_header *)pb->buffer;
+
+ /* Fill the packet header with magic number. */
+ ph->word_count = 0xffff;
+ ph->dtype = 0xff;
+
+ dma_sync_single_for_cpu(&ip->isys->adev->dev, pb->dma_addr,
+ sizeof(*ph), DMA_BIDIRECTIONAL);
+ spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+ list_move(&ib->head, &bl->head);
+
+ return ib;
+}
new file mode 100644
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_CSI2_H
+#define IPU_ISYS_CSI2_H
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+
+#include "ipu-isys-queue.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-isys.h"
+
+struct ipu_isys_csi2_timing;
+struct ipu_isys_csi2_pdata;
+struct ipu_isys;
+
+#define NR_OF_CSI2_SINK_PADS 1
+#define CSI2_PAD_SINK 0
+#define NR_OF_CSI2_SOURCE_PADS 1
+#define CSI2_PAD_SOURCE 1
+#define NR_OF_CSI2_PADS (NR_OF_CSI2_SINK_PADS + NR_OF_CSI2_SOURCE_PADS)
+
+#define IPU_ISYS_SHORT_PACKET_BUFFER_NUM VIDEO_MAX_FRAME
+#define IPU_ISYS_SHORT_PACKET_WIDTH 32
+#define IPU_ISYS_SHORT_PACKET_FRAME_PACKETS 2
+#define IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS 64
+#define IPU_ISYS_SHORT_PACKET_UNITSIZE 8
+#define IPU_ISYS_SHORT_PACKET_GENERAL_DT 0
+#define IPU_ISYS_SHORT_PACKET_PT 0
+#define IPU_ISYS_SHORT_PACKET_FT 0
+
+#define IPU_ISYS_SHORT_PACKET_STRIDE \
+ (IPU_ISYS_SHORT_PACKET_WIDTH * \
+ IPU_ISYS_SHORT_PACKET_UNITSIZE)
+#define IPU_ISYS_SHORT_PACKET_NUM(num_lines) \
+ ((num_lines) * 2 + IPU_ISYS_SHORT_PACKET_FRAME_PACKETS + \
+ IPU_ISYS_SHORT_PACKET_EXTRA_PACKETS)
+#define IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) \
+ DIV_ROUND_UP(IPU_ISYS_SHORT_PACKET_NUM(num_lines) * \
+ IPU_ISYS_SHORT_PACKET_UNITSIZE, \
+ IPU_ISYS_SHORT_PACKET_STRIDE)
+#define IPU_ISYS_SHORT_PACKET_BUF_SIZE(num_lines) \
+ (IPU_ISYS_SHORT_PACKET_WIDTH * \
+ IPU_ISYS_SHORT_PACKET_PKT_LINES(num_lines) * \
+ IPU_ISYS_SHORT_PACKET_UNITSIZE)
+
+#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER 256
+#define IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE 16
+#define IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE \
+ (IPU_ISYS_SHORT_PACKET_TRACE_MSG_NUMBER * \
+ IPU_ISYS_SHORT_PACKET_TRACE_MSG_SIZE)
+
+#define IPU_ISYS_SHORT_PACKET_FROM_RECEIVER 0
+#define IPU_ISYS_SHORT_PACKET_FROM_TUNIT 1
+
+#define IPU_ISYS_SHORT_PACKET_TRACE_MAX_TIMESHIFT 100
+#define IPU_ISYS_SHORT_PACKET_TRACE_EVENT_MASK 0x2082
+#define IPU_SKEW_CAL_LIMIT_HZ (1500000000ul / 2)
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_A 0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_CLANE_B 0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_A 95
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_CLANE_B -8
+
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_A 0
+#define CSI2_CSI_RX_DLY_CNT_TERMEN_DLANE_B 0
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_A 85
+#define CSI2_CSI_RX_DLY_CNT_SETTLE_DLANE_B -2
+
+#define IPU_EOF_TIMEOUT 300
+#define IPU_EOF_TIMEOUT_JIFFIES msecs_to_jiffies(IPU_EOF_TIMEOUT)
+
+/*
+ * struct ipu_isys_csi2
+ *
+ * @nlanes: number of lanes in the receiver
+ */
+struct ipu_isys_csi2 {
+ struct ipu_isys_csi2_pdata *pdata;
+ struct ipu_isys *isys;
+ struct ipu_isys_subdev asd;
+ struct ipu_isys_video av;
+ struct completion eof_completion;
+
+ void __iomem *base;
+ u32 receiver_errors;
+ unsigned int nlanes;
+ unsigned int index;
+ atomic_t sof_sequence;
+ bool in_frame;
+ bool wait_for_sync;
+
+ struct v4l2_ctrl *store_csi2_header;
+};
+
+struct ipu_isys_csi2_timing {
+ u32 ctermen;
+ u32 csettle;
+ u32 dtermen;
+ u32 dsettle;
+};
+
+/*
+ * This structure defines the MIPI packet header output
+ * from IPU MIPI receiver. Due to hardware conversion,
+ * this structure is not the same as defined in CSI-2 spec.
+ */
+struct ipu_isys_mipi_packet_header {
+ u32 word_count:16, dtype:13, sync:2, stype:1;
+ u32 sid:4, port_id:4, reserved:23, odd_even:1;
+} __packed;
+
+/*
+ * This structure defines the trace message content
+ * for CSI2 receiver monitor messages.
+ */
+struct ipu_isys_csi2_monitor_message {
+ u64 fe:1,
+ fs:1,
+ pe:1,
+ ps:1,
+ le:1,
+ ls:1,
+ reserved1:2,
+ sequence:2,
+ reserved2:2,
+ flash_shutter:4,
+ error_cause:12,
+ fifo_overrun:1,
+ crc_error:2,
+ reserved3:1,
+ timestamp_l:16,
+ port:4, vc:2, reserved4:2, frame_sync:4, reserved5:4;
+ u64 reserved6:3,
+ cmd:2, reserved7:1, monitor_id:7, reserved8:1, timestamp_h:50;
+} __packed;
+
+#define to_ipu_isys_csi2(sd) container_of(to_ipu_isys_subdev(sd), \
+ struct ipu_isys_csi2, asd)
+
+int ipu_isys_csi2_get_link_freq(struct ipu_isys_csi2 *csi2, __s64 *link_freq);
+int ipu_isys_csi2_init(struct ipu_isys_csi2 *csi2,
+ struct ipu_isys *isys,
+ void __iomem *base, unsigned int index);
+void ipu_isys_csi2_cleanup(struct ipu_isys_csi2 *csi2);
+struct ipu_isys_buffer *
+ipu_isys_csi2_get_short_packet_buffer(struct ipu_isys_pipeline *ip,
+ struct ipu_isys_buffer_list *bl);
+void ipu_isys_csi2_sof_event(struct ipu_isys_csi2 *csi2);
+void ipu_isys_csi2_eof_event(struct ipu_isys_csi2 *csi2);
+void ipu_isys_csi2_wait_last_eof(struct ipu_isys_csi2 *csi2);
+
+/* interface for platform specific */
+int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd,
+ struct ipu_isys_csi2_timing timing,
+ unsigned int nlanes, int enable);
+unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip,
+ unsigned int *timestamp);
+void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2);
+void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2);
+
+#endif /* IPU_ISYS_CSI2_H */
new file mode 100644
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2016 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_MEDIA_H
+#define IPU_ISYS_MEDIA_H
+
+#include <linux/slab.h>
+#include <media/media-entity.h>
+
+struct __packed media_request_cmd {
+ __u32 cmd;
+ __u32 request;
+ __u32 flags;
+};
+
+struct __packed media_event_request_complete {
+ __u32 id;
+};
+
+#define MEDIA_EVENT_TYPE_REQUEST_COMPLETE 1
+
+struct __packed media_event {
+ __u32 type;
+ __u32 sequence;
+ __u32 reserved[4];
+
+ union {
+ struct media_event_request_complete req_complete;
+ };
+};
+
+enum media_device_request_state {
+ MEDIA_DEVICE_REQUEST_STATE_IDLE,
+ MEDIA_DEVICE_REQUEST_STATE_QUEUED,
+ MEDIA_DEVICE_REQUEST_STATE_DELETED,
+ MEDIA_DEVICE_REQUEST_STATE_COMPLETE,
+};
+
+struct media_kevent {
+ struct list_head list;
+ struct media_event ev;
+};
+
+struct media_device_request {
+ u32 id;
+ struct media_device *mdev;
+ struct file *filp;
+ struct media_kevent *kev;
+ struct kref kref;
+ struct list_head list;
+ struct list_head fh_list;
+ enum media_device_request_state state;
+ struct list_head data;
+ u32 flags;
+};
+
+static inline struct media_device_request *
+media_device_request_find(struct media_device *mdev, u16 reqid)
+{
+ return NULL;
+}
+
+static inline void media_device_request_get(struct media_device_request *req)
+{
+}
+
+static inline void media_device_request_put(struct media_device_request *req)
+{
+}
+
+static inline void
+media_device_request_complete(struct media_device *mdev,
+ struct media_device_request *req)
+{
+}
+
+#endif /* IPU_ISYS_MEDIA_H */
new file mode 100644
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-ioctl.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-isys-video.h"
+
+static bool wall_clock_ts_on;
+module_param(wall_clock_ts_on, bool, 0660);
+MODULE_PARM_DESC(wall_clock_ts_on, "Timestamp based on REALTIME clock");
+
+static int queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ bool use_fmt = false;
+ unsigned int i;
+
+ /* num_planes == 0: we're being called through VIDIOC_REQBUFS */
+ if (!*num_planes) {
+ use_fmt = true;
+ *num_planes = av->mpix.num_planes;
+ }
+
+ for (i = 0; i < *num_planes; i++) {
+ if (use_fmt)
+ sizes[i] = av->mpix.plane_fmt[i].sizeimage;
+ alloc_devs[i] = aq->dev;
+ dev_dbg(&av->isys->adev->dev,
+ "%s: queue setup: plane %d size %u\n",
+ av->vdev.name, i, sizes[i]);
+ }
+
+ return 0;
+}
+
+static void ipu_isys_queue_lock(struct vb2_queue *q)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+ dev_dbg(&av->isys->adev->dev, "%s: queue lock\n", av->vdev.name);
+ mutex_lock(&av->mutex);
+}
+
+static void ipu_isys_queue_unlock(struct vb2_queue *q)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+ dev_dbg(&av->isys->adev->dev, "%s: queue unlock\n", av->vdev.name);
+ mutex_unlock(&av->mutex);
+}
+
+static int buf_init(struct vb2_buffer *vb)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+ dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
+ __func__);
+
+ if (aq->buf_init)
+ return aq->buf_init(vb);
+
+ return 0;
+}
+
+int ipu_isys_buf_prepare(struct vb2_buffer *vb)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+ dev_dbg(&av->isys->adev->dev,
+ "buffer: %s: configured size %u, buffer size %lu\n",
+ av->vdev.name,
+ av->mpix.plane_fmt[0].sizeimage, vb2_plane_size(vb, 0));
+
+ if (av->mpix.plane_fmt[0].sizeimage > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vb2_set_plane_payload(vb, 0, av->mpix.plane_fmt[0].bytesperline *
+ av->mpix.height);
+ vb->planes[0].data_offset = av->line_header_length / BITS_PER_BYTE;
+
+ return 0;
+}
+
+static int buf_prepare(struct vb2_buffer *vb)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ int rval;
+
+ if (av->isys->adev->isp->flr_done)
+ return -EIO;
+
+ rval = aq->buf_prepare(vb);
+ return rval;
+}
+
+static void buf_finish(struct vb2_buffer *vb)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+ dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
+ __func__);
+
+}
+
+static void buf_cleanup(struct vb2_buffer *vb)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+ dev_dbg(&av->isys->adev->dev, "buffer: %s: %s\n", av->vdev.name,
+ __func__);
+
+ if (aq->buf_cleanup)
+ return aq->buf_cleanup(vb);
+}
+
+/*
+ * Queue a buffer list back to incoming or active queues. The buffers
+ * are removed from the buffer list.
+ */
+void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl,
+ unsigned long op_flags,
+ enum vb2_buffer_state state)
+{
+ struct ipu_isys_buffer *ib, *ib_safe;
+ unsigned long flags;
+ bool first = true;
+
+ if (!bl)
+ return;
+
+ WARN_ON(!bl->nbufs);
+ WARN_ON(op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE &&
+ op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING);
+
+ list_for_each_entry_safe(ib, ib_safe, &bl->head, head) {
+ struct ipu_isys_video *av;
+
+ if (ib->type == IPU_ISYS_VIDEO_BUFFER) {
+ struct vb2_buffer *vb =
+ ipu_isys_buffer_to_vb2_buffer(ib);
+ struct ipu_isys_queue *aq =
+ vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+
+ av = ipu_isys_queue_to_video(aq);
+ spin_lock_irqsave(&aq->lock, flags);
+ list_del(&ib->head);
+ if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE)
+ list_add(&ib->head, &aq->active);
+ else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING)
+ list_add_tail(&ib->head, &aq->incoming);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (op_flags & IPU_ISYS_BUFFER_LIST_FL_SET_STATE)
+ vb2_buffer_done(vb, state);
+ } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) {
+ struct ipu_isys_private_buffer *pb =
+ ipu_isys_buffer_to_private_buffer(ib);
+ struct ipu_isys_pipeline *ip = pb->ip;
+
+ av = container_of(ip, struct ipu_isys_video, ip);
+ spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+ list_del(&ib->head);
+ if (op_flags & IPU_ISYS_BUFFER_LIST_FL_ACTIVE)
+ list_add(&ib->head, &ip->short_packet_active);
+ else if (op_flags & IPU_ISYS_BUFFER_LIST_FL_INCOMING)
+ list_add(&ib->head, &ip->short_packet_incoming);
+ spin_unlock_irqrestore(&ip->short_packet_queue_lock,
+ flags);
+ } else {
+ WARN_ON(1);
+ return;
+ }
+
+ if (first) {
+ dev_dbg(&av->isys->adev->dev,
+ "queue buf list %p flags %lx, s %d, %d bufs\n",
+ bl, op_flags, state, bl->nbufs);
+ first = false;
+ }
+
+ bl->nbufs--;
+ }
+
+ WARN_ON(bl->nbufs);
+}
+
+/*
+ * flush_firmware_streamon_fail() - Flush in cases where requests may
+ * have been queued to firmware and the *firmware streamon fails for a
+ * reason or another.
+ */
+static void flush_firmware_streamon_fail(struct ipu_isys_pipeline *ip)
+{
+ struct ipu_isys_video *pipe_av =
+ container_of(ip, struct ipu_isys_video, ip);
+ struct ipu_isys_queue *aq;
+ unsigned long flags;
+
+ lockdep_assert_held(&pipe_av->mutex);
+
+ list_for_each_entry(aq, &ip->queues, node) {
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ struct ipu_isys_buffer *ib, *ib_safe;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ list_for_each_entry_safe(ib, ib_safe, &aq->active, head) {
+ struct vb2_buffer *vb =
+ ipu_isys_buffer_to_vb2_buffer(ib);
+
+ list_del(&ib->head);
+ if (av->streaming) {
+ dev_dbg(&av->isys->adev->dev,
+ "%s: queue buffer %u back to incoming\n",
+ av->vdev.name,
+ vb->index);
+ /* Queue already streaming, return to driver. */
+ list_add(&ib->head, &aq->incoming);
+ continue;
+ }
+ /* Queue not yet streaming, return to user. */
+ dev_dbg(&av->isys->adev->dev,
+ "%s: return %u back to videobuf2\n",
+ av->vdev.name,
+ vb->index);
+ vb2_buffer_done(ipu_isys_buffer_to_vb2_buffer(ib),
+ VB2_BUF_STATE_QUEUED);
+ }
+ spin_unlock_irqrestore(&aq->lock, flags);
+ }
+}
+
+/*
+ * Attempt obtaining a buffer list from the incoming queues, a list of
+ * buffers that contains one entry from each video buffer queue. If
+ * all queues have no buffers, the buffers that were already dequeued
+ * are returned to their queues.
+ */
+static int buffer_list_get(struct ipu_isys_pipeline *ip,
+ struct ipu_isys_buffer_list *bl)
+{
+ struct ipu_isys_queue *aq;
+ struct ipu_isys_buffer *ib;
+ unsigned long flags;
+ int ret = 0;
+
+ bl->nbufs = 0;
+ INIT_LIST_HEAD(&bl->head);
+
+ list_for_each_entry(aq, &ip->queues, node) {
+ struct ipu_isys_buffer *ib;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ if (list_empty(&aq->incoming)) {
+ spin_unlock_irqrestore(&aq->lock, flags);
+ ret = -ENODATA;
+ goto error;
+ }
+
+ ib = list_last_entry(&aq->incoming,
+ struct ipu_isys_buffer, head);
+ if (ib->req) {
+ spin_unlock_irqrestore(&aq->lock, flags);
+ ret = -ENODATA;
+ goto error;
+ }
+
+ dev_dbg(&ip->isys->adev->dev, "buffer: %s: buffer %u\n",
+ ipu_isys_queue_to_video(aq)->vdev.name,
+ ipu_isys_buffer_to_vb2_buffer(ib)->index
+ );
+ list_del(&ib->head);
+ list_add(&ib->head, &bl->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ bl->nbufs++;
+ }
+
+ list_for_each_entry(ib, &bl->head, head) {
+ struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+ aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ if (aq->prepare_frame_buff_set)
+ aq->prepare_frame_buff_set(vb);
+ }
+
+ /* Get short packet buffer. */
+ if (ip->interlaced && ip->isys->short_packet_source ==
+ IPU_ISYS_SHORT_PACKET_FROM_RECEIVER) {
+ ib = ipu_isys_csi2_get_short_packet_buffer(ip, bl);
+ if (!ib) {
+ ret = -ENODATA;
+ dev_err(&ip->isys->adev->dev,
+ "No more short packet buffers. Driver bug?");
+ WARN_ON(1);
+ goto error;
+ }
+ bl->nbufs++;
+ }
+
+ dev_dbg(&ip->isys->adev->dev, "get buffer list %p, %u buffers\n", bl,
+ bl->nbufs);
+ return ret;
+
+error:
+ if (!list_empty(&bl->head))
+ ipu_isys_buffer_list_queue(bl,
+ IPU_ISYS_BUFFER_LIST_FL_INCOMING, 0);
+ return ret;
+}
+
+void
+ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer *vb,
+ struct ipu_fw_isys_frame_buff_set_abi *set)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = container_of(aq, struct ipu_isys_video, aq);
+
+ if (av->compression)
+ set->output_pins[aq->fw_output].compress = 1;
+
+ set->output_pins[aq->fw_output].addr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ set->output_pins[aq->fw_output].out_buf_id =
+ vb->index + 1;
+}
+
+/*
+ * Convert a buffer list to a isys fw ABI framebuffer set. The
+ * buffer list is not modified.
+ */
+#define IPU_ISYS_FRAME_NUM_THRESHOLD (30)
+void
+ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi *set,
+ struct ipu_isys_pipeline *ip,
+ struct ipu_isys_buffer_list *bl)
+{
+ struct ipu_isys_buffer *ib;
+
+ WARN_ON(!bl->nbufs);
+
+ set->send_irq_sof = 1;
+ set->send_resp_sof = 1;
+ set->send_irq_eof = 0;
+ set->send_resp_eof = 0;
+
+ if (ip->streaming)
+ set->send_irq_capture_ack = 0;
+ else
+ set->send_irq_capture_ack = 1;
+ set->send_irq_capture_done = 0;
+
+ set->send_resp_capture_ack = 1;
+ set->send_resp_capture_done = 1;
+ if (!ip->interlaced &&
+ atomic_read(&ip->sequence) >= IPU_ISYS_FRAME_NUM_THRESHOLD) {
+ set->send_resp_capture_ack = 0;
+ set->send_resp_capture_done = 0;
+ }
+
+ list_for_each_entry(ib, &bl->head, head) {
+ if (ib->type == IPU_ISYS_VIDEO_BUFFER) {
+ struct vb2_buffer *vb =
+ ipu_isys_buffer_to_vb2_buffer(ib);
+ struct ipu_isys_queue *aq =
+ vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+
+ if (aq->fill_frame_buff_set_pin)
+ aq->fill_frame_buff_set_pin(vb, set);
+ } else if (ib->type == IPU_ISYS_SHORT_PACKET_BUFFER) {
+ struct ipu_isys_private_buffer *pb =
+ ipu_isys_buffer_to_private_buffer(ib);
+ struct ipu_fw_isys_output_pin_payload_abi *output_pin =
+ &set->output_pins[ip->short_packet_output_pin];
+
+ output_pin->addr = pb->dma_addr;
+ output_pin->out_buf_id = pb->index + 1;
+ } else {
+ WARN_ON(1);
+ }
+ }
+}
+
+/* Start streaming for real. The buffer list must be available. */
+static int ipu_isys_stream_start(struct ipu_isys_pipeline *ip,
+ struct ipu_isys_buffer_list *bl, bool error)
+{
+ struct ipu_isys_video *pipe_av =
+ container_of(ip, struct ipu_isys_video, ip);
+ struct ipu_isys_buffer_list __bl;
+ int rval;
+
+ mutex_lock(&pipe_av->isys->stream_mutex);
+
+ rval = ipu_isys_video_set_streaming(pipe_av, 1, bl);
+ if (rval) {
+ mutex_unlock(&pipe_av->isys->stream_mutex);
+ goto out_requeue;
+ }
+
+ ip->streaming = 1;
+
+ mutex_unlock(&pipe_av->isys->stream_mutex);
+
+ bl = &__bl;
+
+ do {
+ struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
+ struct isys_fw_msgs *msg;
+ enum ipu_fw_isys_send_type send_type =
+ IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE;
+
+ rval = buffer_list_get(ip, bl);
+ if (rval == -EINVAL)
+ goto out_requeue;
+ else if (rval < 0)
+ break;
+
+ msg = ipu_get_fw_msg_buf(ip);
+ if (!msg)
+ return -ENOMEM;
+
+ buf = to_frame_msg_buf(msg);
+
+ ipu_isys_buffer_to_fw_frame_buff(buf, ip, bl);
+
+ ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf,
+ ip->nr_output_pins);
+
+ ipu_isys_buffer_list_queue(bl,
+ IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+ rval = ipu_fw_isys_complex_cmd(pipe_av->isys,
+ ip->stream_handle,
+ buf, to_dma_addr(msg),
+ sizeof(*buf),
+ send_type);
+ ipu_put_fw_mgs_buf(pipe_av->isys, (uintptr_t)buf);
+ } while (!WARN_ON(rval));
+
+ return 0;
+
+out_requeue:
+ if (bl && bl->nbufs)
+ ipu_isys_buffer_list_queue(bl,
+ IPU_ISYS_BUFFER_LIST_FL_INCOMING |
+ (error ?
+ IPU_ISYS_BUFFER_LIST_FL_SET_STATE :
+ 0),
+ error ? VB2_BUF_STATE_ERROR :
+ VB2_BUF_STATE_QUEUED);
+ flush_firmware_streamon_fail(ip);
+
+ return rval;
+}
+
+static void __buf_queue(struct vb2_buffer *vb, bool force)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ struct ipu_isys_buffer *ib = vb2_buffer_to_ipu_isys_buffer(vb);
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct ipu_isys_buffer_list bl;
+
+ struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
+ struct isys_fw_msgs *msg;
+
+ struct ipu_isys_video *pipe_av =
+ container_of(ip, struct ipu_isys_video, ip);
+ unsigned long flags;
+ unsigned int i;
+ int rval;
+
+ dev_dbg(&av->isys->adev->dev, "buffer: %s: buf_queue %u\n",
+ av->vdev.name,
+ vb->index
+ );
+
+ for (i = 0; i < vb->num_planes; i++)
+ dev_dbg(&av->isys->adev->dev, "iova: plane %u iova 0x%x\n", i,
+ (u32)vb2_dma_contig_plane_dma_addr(vb, i));
+
+ spin_lock_irqsave(&aq->lock, flags);
+ list_add(&ib->head, &aq->incoming);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (ib->req)
+ return;
+
+ if (!pipe_av || !vb->vb2_queue->streaming) {
+ dev_dbg(&av->isys->adev->dev,
+ "not pipe_av set, adding to incoming\n");
+ return;
+ }
+
+ mutex_unlock(&av->mutex);
+ mutex_lock(&pipe_av->mutex);
+
+ if (!force && ip->nr_streaming != ip->nr_queues) {
+ dev_dbg(&av->isys->adev->dev,
+ "not streaming yet, adding to incoming\n");
+ goto out;
+ }
+
+ /*
+ * We just put one buffer to the incoming list of this queue
+ * (above). Let's see whether all queues in the pipeline would
+ * have a buffer.
+ */
+ rval = buffer_list_get(ip, &bl);
+ if (rval < 0) {
+ if (rval == -EINVAL) {
+ dev_err(&av->isys->adev->dev,
+ "error: should not happen\n");
+ WARN_ON(1);
+ } else {
+ dev_dbg(&av->isys->adev->dev,
+ "not enough buffers available\n");
+ }
+ goto out;
+ }
+
+ msg = ipu_get_fw_msg_buf(ip);
+ if (!msg) {
+ rval = -ENOMEM;
+ goto out;
+ }
+ buf = to_frame_msg_buf(msg);
+
+ ipu_isys_buffer_to_fw_frame_buff(buf, ip, &bl);
+
+ ipu_fw_isys_dump_frame_buff_set(&pipe_av->isys->adev->dev, buf,
+ ip->nr_output_pins);
+
+ if (!ip->streaming) {
+ dev_dbg(&av->isys->adev->dev,
+ "got a buffer to start streaming!\n");
+ rval = ipu_isys_stream_start(ip, &bl, true);
+ if (rval)
+ dev_err(&av->isys->adev->dev,
+ "stream start failed.\n");
+ goto out;
+ }
+
+ /*
+ * We must queue the buffers in the buffer list to the
+ * appropriate video buffer queues BEFORE passing them to the
+ * firmware since we could get a buffer event back before we
+ * have queued them ourselves to the active queue.
+ */
+ ipu_isys_buffer_list_queue(&bl, IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+
+ rval = ipu_fw_isys_complex_cmd(pipe_av->isys,
+ ip->stream_handle,
+ buf, to_dma_addr(msg),
+ sizeof(*buf),
+ IPU_FW_ISYS_SEND_TYPE_STREAM_CAPTURE);
+ ipu_put_fw_mgs_buf(pipe_av->isys, (uintptr_t)buf);
+ if (!WARN_ON(rval < 0))
+ dev_dbg(&av->isys->adev->dev, "queued buffer\n");
+
+out:
+ mutex_unlock(&pipe_av->mutex);
+ mutex_lock(&av->mutex);
+}
+
+static void buf_queue(struct vb2_buffer *vb)
+{
+ __buf_queue(vb, false);
+}
+
+int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq)
+{
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ struct v4l2_subdev_format fmt = { 0 };
+ struct media_pad *pad = media_entity_remote_pad(av->vdev.entity.pads);
+ struct v4l2_subdev *sd;
+ int rval;
+
+ if (!pad) {
+ dev_dbg(&av->isys->adev->dev,
+ "video node %s pad not connected\n", av->vdev.name);
+ return -ENOTCONN;
+ }
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = pad->index;
+ rval = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt);
+ if (rval)
+ return rval;
+
+ if (fmt.format.width != av->mpix.width ||
+ fmt.format.height != av->mpix.height) {
+ dev_dbg(&av->isys->adev->dev,
+ "wrong width or height %ux%u (%ux%u expected)\n",
+ av->mpix.width, av->mpix.height,
+ fmt.format.width, fmt.format.height);
+ return -EINVAL;
+ }
+
+ if (fmt.format.field != av->mpix.field) {
+ dev_dbg(&av->isys->adev->dev,
+ "wrong field value 0x%8.8x (0x%8.8x expected)\n",
+ av->mpix.field, fmt.format.field);
+ return -EINVAL;
+ }
+
+ if (fmt.format.code != av->pfmt->code) {
+ dev_dbg(&av->isys->adev->dev,
+ "wrong media bus code 0x%8.8x (0x%8.8x expected)\n",
+ av->pfmt->code, fmt.format.code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Return buffers back to videobuf2. */
+static void return_buffers(struct ipu_isys_queue *aq,
+ enum vb2_buffer_state state)
+{
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ int reset_needed = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&aq->lock, flags);
+ while (!list_empty(&aq->incoming)) {
+ struct ipu_isys_buffer *ib = list_first_entry(&aq->incoming,
+ struct
+ ipu_isys_buffer,
+ head);
+ struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ vb2_buffer_done(vb, state);
+
+ dev_dbg(&av->isys->adev->dev,
+ "%s: stop_streaming incoming %u\n",
+ ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
+ (vb->vb2_queue))->vdev.name,
+ vb->index);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ }
+
+ /*
+ * Something went wrong (FW crash / HW hang / not all buffers
+ * returned from isys) if there are still buffers queued in active
+ * queue. We have to clean up places a bit.
+ */
+ while (!list_empty(&aq->active)) {
+ struct ipu_isys_buffer *ib = list_first_entry(&aq->active,
+ struct
+ ipu_isys_buffer,
+ head);
+ struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ vb2_buffer_done(vb, state);
+
+ dev_warn(&av->isys->adev->dev, "%s: cleaning active queue %u\n",
+ ipu_isys_queue_to_video(vb2_queue_to_ipu_isys_queue
+ (vb->vb2_queue))->vdev.name,
+ vb->index);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ reset_needed = 1;
+ }
+
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ if (reset_needed) {
+ mutex_lock(&av->isys->mutex);
+ av->isys->reset_needed = true;
+ mutex_unlock(&av->isys->mutex);
+ }
+}
+
+static int start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ struct ipu_isys_video *pipe_av;
+ struct ipu_isys_pipeline *ip;
+ struct ipu_isys_buffer_list __bl, *bl = NULL;
+ bool first;
+ int rval;
+
+ dev_dbg(&av->isys->adev->dev,
+ "stream: %s: width %u, height %u, css pixelformat %u\n",
+ av->vdev.name, av->mpix.width, av->mpix.height,
+ av->pfmt->css_pixelformat);
+
+ mutex_lock(&av->isys->stream_mutex);
+
+ first = !av->vdev.entity.pipe;
+
+ if (first) {
+ rval = ipu_isys_video_prepare_streaming(av, 1);
+ if (rval)
+ goto out_return_buffers;
+ }
+
+ mutex_unlock(&av->isys->stream_mutex);
+
+ rval = aq->link_fmt_validate(aq);
+ if (rval) {
+ dev_dbg(&av->isys->adev->dev,
+ "%s: link format validation failed (%d)\n",
+ av->vdev.name, rval);
+ goto out_unprepare_streaming;
+ }
+
+ ip = to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ pipe_av = container_of(ip, struct ipu_isys_video, ip);
+ mutex_unlock(&av->mutex);
+
+ mutex_lock(&pipe_av->mutex);
+ ip->nr_streaming++;
+ dev_dbg(&av->isys->adev->dev, "queue %u of %u\n", ip->nr_streaming,
+ ip->nr_queues);
+ list_add(&aq->node, &ip->queues);
+ if (ip->nr_streaming != ip->nr_queues)
+ goto out;
+
+ if (list_empty(&av->isys->requests)) {
+ bl = &__bl;
+ rval = buffer_list_get(ip, bl);
+ if (rval == -EINVAL) {
+ goto out_stream_start;
+ } else if (rval < 0) {
+ dev_dbg(&av->isys->adev->dev,
+ "no request available, postponing streamon\n");
+ goto out;
+ }
+ }
+
+ rval = ipu_isys_stream_start(ip, bl, false);
+ if (rval)
+ goto out_stream_start;
+
+out:
+ mutex_unlock(&pipe_av->mutex);
+ mutex_lock(&av->mutex);
+
+ return 0;
+
+out_stream_start:
+ list_del(&aq->node);
+ ip->nr_streaming--;
+ mutex_unlock(&pipe_av->mutex);
+ mutex_lock(&av->mutex);
+
+out_unprepare_streaming:
+ mutex_lock(&av->isys->stream_mutex);
+ if (first)
+ ipu_isys_video_prepare_streaming(av, 0);
+
+out_return_buffers:
+ mutex_unlock(&av->isys->stream_mutex);
+ return_buffers(aq, VB2_BUF_STATE_QUEUED);
+
+ return rval;
+}
+
+static void stop_streaming(struct vb2_queue *q)
+{
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(q);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct ipu_isys_video *pipe_av =
+ container_of(ip, struct ipu_isys_video, ip);
+
+ if (pipe_av != av) {
+ mutex_unlock(&av->mutex);
+ mutex_lock(&pipe_av->mutex);
+ }
+
+ mutex_lock(&av->isys->stream_mutex);
+ if (ip->nr_streaming == ip->nr_queues && ip->streaming)
+ ipu_isys_video_set_streaming(av, 0, NULL);
+ if (ip->nr_streaming == 1)
+ ipu_isys_video_prepare_streaming(av, 0);
+ mutex_unlock(&av->isys->stream_mutex);
+
+ ip->nr_streaming--;
+ list_del(&aq->node);
+ ip->streaming = 0;
+
+ if (pipe_av != av) {
+ mutex_unlock(&pipe_av->mutex);
+ mutex_lock(&av->mutex);
+ }
+
+ return_buffers(aq, VB2_BUF_STATE_ERROR);
+}
+
+static unsigned int
+get_sof_sequence_by_timestamp(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *info)
+{
+ struct ipu_isys *isys =
+ container_of(ip, struct ipu_isys_video, ip)->isys;
+ u64 time = (u64)info->timestamp[1] << 32 | info->timestamp[0];
+ unsigned int i;
+
+ /*
+ * The timestamp is invalid as no TSC in some FPGA platform,
+ * so get the sequence from pipeline directly in this case.
+ */
+ if (time == 0)
+ return atomic_read(&ip->sequence) - 1;
+
+ for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
+ if (time == ip->seq[i].timestamp) {
+ dev_dbg(&isys->adev->dev,
+ "sof: using seq nr %u for ts 0x%16.16llx\n",
+ ip->seq[i].sequence, time);
+ return ip->seq[i].sequence;
+ }
+
+ dev_dbg(&isys->adev->dev, "SOF: looking for 0x%16.16llx\n", time);
+ for (i = 0; i < IPU_ISYS_MAX_PARALLEL_SOF; i++)
+ dev_dbg(&isys->adev->dev,
+ "SOF: sequence %u, timestamp value 0x%16.16llx\n",
+ ip->seq[i].sequence, ip->seq[i].timestamp);
+ dev_dbg(&isys->adev->dev, "SOF sequence number not found\n");
+
+ return 0;
+}
+
+static u64 get_sof_ns_delta(struct ipu_isys_video *av,
+ struct ipu_fw_isys_resp_info_abi *info)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(&av->isys->adev->dev);
+ struct ipu_device *isp = adev->isp;
+ u64 delta, tsc_now;
+
+ if (!ipu_buttress_tsc_read(isp, &tsc_now))
+ delta = tsc_now -
+ ((u64)info->timestamp[1] << 32 | info->timestamp[0]);
+ else
+ delta = 0;
+
+ return ipu_buttress_tsc_ticks_to_ns(delta);
+}
+
+void
+ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib,
+ struct ipu_fw_isys_resp_info_abi *info)
+{
+ struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct ipu_isys_queue *aq = vb2_queue_to_ipu_isys_queue(vb->vb2_queue);
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+ struct device *dev = &av->isys->adev->dev;
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ u64 ns;
+ u32 sequence;
+
+ if (ip->has_sof) {
+ ns = (wall_clock_ts_on) ? ktime_get_real_ns() : ktime_get_ns();
+ ns -= get_sof_ns_delta(av, info);
+ sequence = get_sof_sequence_by_timestamp(ip, info);
+ } else {
+ ns = ((wall_clock_ts_on) ? ktime_get_real_ns() :
+ ktime_get_ns());
+ sequence = (atomic_inc_return(&ip->sequence) - 1)
+ / ip->nr_queues;
+ }
+
+ vbuf->vb2_buf.timestamp = ns;
+ vbuf->sequence = sequence;
+
+ dev_dbg(dev, "buf: %s: buffer done, CPU-timestamp:%lld, sequence:%d\n",
+ av->vdev.name, ktime_get_ns(), sequence);
+ dev_dbg(dev, "index:%d, vbuf timestamp:%lld, endl\n",
+ vb->index, vbuf->vb2_buf.timestamp);
+}
+
+void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib)
+{
+ struct vb2_buffer *vb = ipu_isys_buffer_to_vb2_buffer(ib);
+
+ if (atomic_read(&ib->str2mmio_flag)) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ /*
+ * Operation on buffer is ended with error and will be reported
+ * to the userspace when it is de-queued
+ */
+ atomic_set(&ib->str2mmio_flag, 0);
+ } else {
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ }
+}
+
+void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *info)
+{
+ struct ipu_isys *isys =
+ container_of(ip, struct ipu_isys_video, ip)->isys;
+ struct ipu_isys_queue *aq = ip->output_pins[info->pin_id].aq;
+ struct ipu_isys_buffer *ib;
+ struct vb2_buffer *vb;
+ unsigned long flags;
+ bool first = true;
+ struct vb2_v4l2_buffer *buf;
+
+ dev_dbg(&isys->adev->dev, "buffer: %s: received buffer %8.8x\n",
+ ipu_isys_queue_to_video(aq)->vdev.name, info->pin.addr);
+
+ spin_lock_irqsave(&aq->lock, flags);
+ if (list_empty(&aq->active)) {
+ spin_unlock_irqrestore(&aq->lock, flags);
+ dev_err(&isys->adev->dev, "active queue empty\n");
+ return;
+ }
+
+ list_for_each_entry_reverse(ib, &aq->active, head) {
+ dma_addr_t addr;
+
+ vb = ipu_isys_buffer_to_vb2_buffer(ib);
+ addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ if (info->pin.addr != addr) {
+ if (first)
+ dev_err(&isys->adev->dev,
+ "WARN: buffer address %pad expected!\n",
+ &addr);
+ first = false;
+ continue;
+ }
+
+ if (info->error_info.error ==
+ IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO) {
+ /*
+ * Check for error message:
+ * 'IPU_FW_ISYS_ERROR_HW_REPORTED_STR2MMIO'
+ */
+ atomic_set(&ib->str2mmio_flag, 1);
+ }
+ dev_dbg(&isys->adev->dev, "buffer: found buffer %pad\n", &addr);
+
+ buf = to_vb2_v4l2_buffer(vb);
+ buf->field = V4L2_FIELD_NONE;
+
+ list_del(&ib->head);
+ spin_unlock_irqrestore(&aq->lock, flags);
+
+ ipu_isys_buf_calc_sequence_time(ib, info);
+
+ /*
+ * For interlaced buffers, the notification to user space
+ * is postponed to capture_done event since the field
+ * information is available only at that time.
+ */
+ if (ip->interlaced) {
+ spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+ list_add(&ib->head, &ip->pending_interlaced_bufs);
+ spin_unlock_irqrestore(&ip->short_packet_queue_lock,
+ flags);
+ } else {
+ ipu_isys_queue_buf_done(ib);
+ }
+
+ return;
+ }
+
+ dev_err(&isys->adev->dev,
+ "WARNING: cannot find a matching video buffer!\n");
+
+ spin_unlock_irqrestore(&aq->lock, flags);
+}
+
+void
+ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *info)
+{
+ struct ipu_isys *isys =
+ container_of(ip, struct ipu_isys_video, ip)->isys;
+ unsigned long flags;
+
+ dev_dbg(&isys->adev->dev, "receive short packet buffer %8.8x\n",
+ info->pin.addr);
+ spin_lock_irqsave(&ip->short_packet_queue_lock, flags);
+ ip->cur_field = ipu_isys_csi2_get_current_field(ip, info->timestamp);
+ spin_unlock_irqrestore(&ip->short_packet_queue_lock, flags);
+}
+
+struct vb2_ops ipu_isys_queue_ops = {
+ .queue_setup = queue_setup,
+ .wait_prepare = ipu_isys_queue_unlock,
+ .wait_finish = ipu_isys_queue_lock,
+ .buf_init = buf_init,
+ .buf_prepare = buf_prepare,
+ .buf_finish = buf_finish,
+ .buf_cleanup = buf_cleanup,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .buf_queue = buf_queue,
+};
+
+int ipu_isys_queue_init(struct ipu_isys_queue *aq)
+{
+ struct ipu_isys *isys = ipu_isys_queue_to_video(aq)->isys;
+ int rval;
+
+ if (!aq->vbq.io_modes)
+ aq->vbq.io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
+ aq->vbq.drv_priv = aq;
+ aq->vbq.ops = &ipu_isys_queue_ops;
+ aq->vbq.mem_ops = &vb2_dma_contig_memops;
+ aq->vbq.timestamp_flags = (wall_clock_ts_on) ?
+ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN : V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ rval = vb2_queue_init(&aq->vbq);
+ if (rval)
+ return rval;
+
+ aq->dev = &isys->adev->dev;
+ aq->vbq.dev = &isys->adev->dev;
+ spin_lock_init(&aq->lock);
+ INIT_LIST_HEAD(&aq->active);
+ INIT_LIST_HEAD(&aq->incoming);
+
+ return 0;
+}
+
+void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq)
+{
+ vb2_queue_release(&aq->vbq);
+}
new file mode 100644
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_QUEUE_H
+#define IPU_ISYS_QUEUE_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include <media/videobuf2-v4l2.h>
+
+#include "ipu-isys-media.h"
+
+struct ipu_isys_video;
+struct ipu_isys_pipeline;
+struct ipu_fw_isys_resp_info_abi;
+struct ipu_fw_isys_frame_buff_set_abi;
+
+enum ipu_isys_buffer_type {
+ IPU_ISYS_VIDEO_BUFFER,
+ IPU_ISYS_SHORT_PACKET_BUFFER,
+};
+
+struct ipu_isys_queue {
+ struct list_head node; /* struct ipu_isys_pipeline.queues */
+ struct vb2_queue vbq;
+ struct device *dev;
+ /*
+ * @lock: serialise access to queued and pre_streamon_queued
+ */
+ spinlock_t lock;
+ struct list_head active;
+ struct list_head incoming;
+ u32 css_pin_type;
+ unsigned int fw_output;
+ int (*buf_init)(struct vb2_buffer *vb);
+ void (*buf_cleanup)(struct vb2_buffer *vb);
+ int (*buf_prepare)(struct vb2_buffer *vb);
+ void (*prepare_frame_buff_set)(struct vb2_buffer *vb);
+ void (*fill_frame_buff_set_pin)(struct vb2_buffer *vb,
+ struct ipu_fw_isys_frame_buff_set_abi *
+ set);
+ int (*link_fmt_validate)(struct ipu_isys_queue *aq);
+};
+
+struct ipu_isys_buffer {
+ struct list_head head;
+ enum ipu_isys_buffer_type type;
+ struct list_head req_head;
+ struct media_device_request *req;
+ atomic_t str2mmio_flag;
+};
+
+struct ipu_isys_video_buffer {
+ struct vb2_v4l2_buffer vb_v4l2;
+ struct ipu_isys_buffer ib;
+};
+
+struct ipu_isys_private_buffer {
+ struct ipu_isys_buffer ib;
+ struct ipu_isys_pipeline *ip;
+ unsigned int index;
+ unsigned int bytesused;
+ dma_addr_t dma_addr;
+ void *buffer;
+};
+
+#define IPU_ISYS_BUFFER_LIST_FL_INCOMING BIT(0)
+#define IPU_ISYS_BUFFER_LIST_FL_ACTIVE BIT(1)
+#define IPU_ISYS_BUFFER_LIST_FL_SET_STATE BIT(2)
+
+struct ipu_isys_buffer_list {
+ struct list_head head;
+ unsigned int nbufs;
+};
+
+#define vb2_queue_to_ipu_isys_queue(__vb2) \
+ container_of(__vb2, struct ipu_isys_queue, vbq)
+
+#define ipu_isys_to_isys_video_buffer(__ib) \
+ container_of(__ib, struct ipu_isys_video_buffer, ib)
+
+#define vb2_buffer_to_ipu_isys_video_buffer(__vb) \
+ container_of(to_vb2_v4l2_buffer(__vb), \
+ struct ipu_isys_video_buffer, vb_v4l2)
+
+#define ipu_isys_buffer_to_vb2_buffer(__ib) \
+ (&ipu_isys_to_isys_video_buffer(__ib)->vb_v4l2.vb2_buf)
+
+#define vb2_buffer_to_ipu_isys_buffer(__vb) \
+ (&vb2_buffer_to_ipu_isys_video_buffer(__vb)->ib)
+
+#define ipu_isys_buffer_to_private_buffer(__ib) \
+ container_of(__ib, struct ipu_isys_private_buffer, ib)
+
+struct ipu_isys_request {
+ struct media_device_request req;
+ /* serialise access to buffers */
+ spinlock_t lock;
+ struct list_head buffers; /* struct ipu_isys_buffer.head */
+ bool dispatched;
+ /*
+ * struct ipu_isys.requests;
+ * struct ipu_isys_pipeline.struct.*
+ */
+ struct list_head head;
+};
+
+#define to_ipu_isys_request(__req) \
+ container_of(__req, struct ipu_isys_request, req)
+
+int ipu_isys_buf_prepare(struct vb2_buffer *vb);
+
+void ipu_isys_buffer_list_queue(struct ipu_isys_buffer_list *bl,
+ unsigned long op_flags,
+ enum vb2_buffer_state state);
+struct ipu_isys_request *
+ipu_isys_next_queued_request(struct ipu_isys_pipeline *ip);
+void
+ipu_isys_buffer_to_fw_frame_buff_pin(struct vb2_buffer *vb,
+ struct ipu_fw_isys_frame_buff_set_abi *
+ set);
+void
+ipu_isys_buffer_to_fw_frame_buff(struct ipu_fw_isys_frame_buff_set_abi *set,
+ struct ipu_isys_pipeline *ip,
+ struct ipu_isys_buffer_list *bl);
+int ipu_isys_link_fmt_validate(struct ipu_isys_queue *aq);
+
+void
+ipu_isys_buf_calc_sequence_time(struct ipu_isys_buffer *ib,
+ struct ipu_fw_isys_resp_info_abi *info);
+void ipu_isys_queue_buf_done(struct ipu_isys_buffer *ib);
+void ipu_isys_queue_buf_ready(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *info);
+void
+ipu_isys_queue_short_packet_ready(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *inf);
+
+int ipu_isys_queue_init(struct ipu_isys_queue *aq);
+void ipu_isys_queue_cleanup(struct ipu_isys_queue *aq);
+
+#endif /* IPU_ISYS_QUEUE_H */
new file mode 100644
@@ -0,0 +1,650 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+
+#include <uapi/linux/media-bus-format.h>
+
+#include "ipu-isys.h"
+#include "ipu-isys-video.h"
+#include "ipu-isys-subdev.h"
+
+unsigned int ipu_isys_mbus_code_to_bpp(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return 24;
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ return 20;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ return 16;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ return 12;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ return 10;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ return 8;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+unsigned int ipu_isys_mbus_code_to_mipi(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ return IPU_ISYS_MIPI_CSI2_TYPE_RGB565;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return IPU_ISYS_MIPI_CSI2_TYPE_RGB888;
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ return IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ return IPU_ISYS_MIPI_CSI2_TYPE_RAW12;
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ return IPU_ISYS_MIPI_CSI2_TYPE_RAW10;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ return IPU_ISYS_MIPI_CSI2_TYPE_RAW8;
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ return IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1);
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ return IPU_ISYS_SUBDEV_PIXELORDER_BGGR;
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ return IPU_ISYS_SUBDEV_PIXELORDER_GBRG;
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ return IPU_ISYS_SUBDEV_PIXELORDER_GRBG;
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ return IPU_ISYS_SUBDEV_PIXELORDER_RGGB;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
+u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code)
+{
+ switch (sink_code) {
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ return MEDIA_BUS_FMT_SBGGR10_1X10;
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ return MEDIA_BUS_FMT_SGBRG10_1X10;
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ return MEDIA_BUS_FMT_SGRBG10_1X10;
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ return MEDIA_BUS_FMT_SRGGB10_1X10;
+ default:
+ return sink_code;
+ }
+}
+
+struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
+ unsigned int which)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ return &asd->ffmt[pad];
+ else
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
+}
+
+struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int target,
+ unsigned int pad, unsigned int which)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return &asd->crop[pad];
+ case V4L2_SEL_TGT_COMPOSE:
+ return &asd->compose[pad];
+ }
+ } else {
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return v4l2_subdev_get_try_crop(sd, sd_state, pad);
+ case V4L2_SEL_TGT_COMPOSE:
+ return v4l2_subdev_get_try_compose(sd, sd_state, pad);
+ }
+ }
+ WARN_ON(1);
+ return NULL;
+}
+
+static int target_valid(struct v4l2_subdev *sd, unsigned int target,
+ unsigned int pad)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+ switch (target) {
+ case V4L2_SEL_TGT_CROP:
+ return asd->valid_tgts[pad].crop;
+ case V4L2_SEL_TGT_COMPOSE:
+ return asd->valid_tgts[pad].compose;
+ default:
+ return 0;
+ }
+}
+
+int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_mbus_framefmt *ffmt,
+ struct v4l2_rect *r,
+ enum isys_subdev_prop_tgt tgt,
+ unsigned int pad, unsigned int which)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ struct v4l2_mbus_framefmt **ffmts = NULL;
+ struct v4l2_rect **crops = NULL;
+ struct v4l2_rect **compose = NULL;
+ unsigned int i;
+ int rval = 0;
+
+ if (tgt == IPU_ISYS_SUBDEV_PROP_TGT_NR_OF)
+ return 0;
+
+ if (WARN_ON(pad >= sd->entity.num_pads))
+ return -EINVAL;
+
+ ffmts = kcalloc(sd->entity.num_pads,
+ sizeof(*ffmts), GFP_KERNEL);
+ if (!ffmts) {
+ rval = -ENOMEM;
+ goto out_subdev_fmt_propagate;
+ }
+ crops = kcalloc(sd->entity.num_pads,
+ sizeof(*crops), GFP_KERNEL);
+ if (!crops) {
+ rval = -ENOMEM;
+ goto out_subdev_fmt_propagate;
+ }
+ compose = kcalloc(sd->entity.num_pads,
+ sizeof(*compose), GFP_KERNEL);
+ if (!compose) {
+ rval = -ENOMEM;
+ goto out_subdev_fmt_propagate;
+ }
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ ffmts[i] = __ipu_isys_get_ffmt(sd, sd_state, i, which);
+ crops[i] = __ipu_isys_get_selection(sd, sd_state,
+ V4L2_SEL_TGT_CROP, i, which);
+ compose[i] = __ipu_isys_get_selection(sd, sd_state,
+ V4L2_SEL_TGT_COMPOSE, i, which);
+ }
+
+ switch (tgt) {
+ case IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT:
+ crops[pad]->left = 0;
+ crops[pad]->top = 0;
+ crops[pad]->width = ffmt->width;
+ crops[pad]->height = ffmt->height;
+ rval = ipu_isys_subdev_fmt_propagate(sd, sd_state, ffmt,
+ crops[pad], tgt + 1, pad, which);
+ goto out_subdev_fmt_propagate;
+ case IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP:
+ if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
+ goto out_subdev_fmt_propagate;
+
+ compose[pad]->left = 0;
+ compose[pad]->top = 0;
+ compose[pad]->width = r->width;
+ compose[pad]->height = r->height;
+ rval = ipu_isys_subdev_fmt_propagate(sd, sd_state, ffmt,
+ compose[pad], tgt + 1, pad, which);
+ goto out_subdev_fmt_propagate;
+ case IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE:
+ if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE)) {
+ rval = -EINVAL;
+ goto out_subdev_fmt_propagate;
+ }
+
+ for (i = 1; i < sd->entity.num_pads; i++) {
+ if (!(sd->entity.pads[i].flags &
+ MEDIA_PAD_FL_SOURCE))
+ continue;
+
+ compose[i]->left = 0;
+ compose[i]->top = 0;
+ compose[i]->width = r->width;
+ compose[i]->height = r->height;
+ rval = ipu_isys_subdev_fmt_propagate(sd, sd_state,
+ ffmt, compose[i], tgt + 1, i, which);
+ if (rval)
+ goto out_subdev_fmt_propagate;
+ }
+ goto out_subdev_fmt_propagate;
+ case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE:
+ if (WARN_ON(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SINK)) {
+ rval = -EINVAL;
+ goto out_subdev_fmt_propagate;
+ }
+
+ crops[pad]->left = 0;
+ crops[pad]->top = 0;
+ crops[pad]->width = r->width;
+ crops[pad]->height = r->height;
+ rval = ipu_isys_subdev_fmt_propagate(sd, sd_state, ffmt,
+ crops[pad], tgt + 1, pad, which);
+ goto out_subdev_fmt_propagate;
+ case IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP:{
+ struct v4l2_subdev_format fmt = {
+ .which = which,
+ .pad = pad,
+ .format = {
+ .width = r->width,
+ .height = r->height,
+ /*
+ * Either use the code from sink pad
+ * or the current one.
+ */
+ .code = ffmt ? ffmt->code :
+ ffmts[pad]->code,
+ .field = ffmt ? ffmt->field :
+ ffmts[pad]->field,
+ },
+ };
+
+ asd->set_ffmt(sd, sd_state, &fmt);
+ goto out_subdev_fmt_propagate;
+ }
+ }
+
+out_subdev_fmt_propagate:
+ kfree(ffmts);
+ kfree(crops);
+ kfree(compose);
+ return rval;
+}
+
+int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, fmt->pad, fmt->which);
+
+ /* No propagation for non-zero pads. */
+ if (fmt->pad) {
+ struct v4l2_mbus_framefmt *sink_ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, 0, fmt->which);
+
+ ffmt->width = sink_ffmt->width;
+ ffmt->height = sink_ffmt->height;
+ ffmt->code = sink_ffmt->code;
+ ffmt->field = sink_ffmt->field;
+
+ return 0;
+ }
+
+ ffmt->width = fmt->format.width;
+ ffmt->height = fmt->format.height;
+ ffmt->code = fmt->format.code;
+ ffmt->field = fmt->format.field;
+
+ return ipu_isys_subdev_fmt_propagate(sd, sd_state, &fmt->format, NULL,
+ IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT,
+ fmt->pad, fmt->which);
+}
+
+int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, fmt->pad, fmt->which);
+ u32 code = asd->supported_codes[fmt->pad][0];
+ unsigned int i;
+
+ WARN_ON(!mutex_is_locked(&asd->mutex));
+
+ fmt->format.width = clamp(fmt->format.width, IPU_ISYS_MIN_WIDTH,
+ IPU_ISYS_MAX_WIDTH);
+ fmt->format.height = clamp(fmt->format.height,
+ IPU_ISYS_MIN_HEIGHT, IPU_ISYS_MAX_HEIGHT);
+
+ for (i = 0; asd->supported_codes[fmt->pad][i]; i++) {
+ if (asd->supported_codes[fmt->pad][i] == fmt->format.code) {
+ code = asd->supported_codes[fmt->pad][i];
+ break;
+ }
+ }
+
+ fmt->format.code = code;
+
+ asd->set_ffmt(sd, sd_state, fmt);
+
+ fmt->format = *ffmt;
+
+ return 0;
+}
+
+int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ int rval;
+
+ mutex_lock(&asd->mutex);
+ rval = __ipu_isys_subdev_set_ffmt(sd, sd_state, fmt);
+ mutex_unlock(&asd->mutex);
+
+ return rval;
+}
+
+int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+ mutex_lock(&asd->mutex);
+ fmt->format = *__ipu_isys_get_ffmt(sd, sd_state, fmt->pad,
+ fmt->which);
+ mutex_unlock(&asd->mutex);
+
+ return 0;
+}
+
+int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ struct media_pad *pad = &asd->sd.entity.pads[sel->pad];
+ struct v4l2_rect *r, __r = { 0 };
+ unsigned int tgt;
+
+ if (!target_valid(sd, sel->target, sel->pad))
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (pad->flags & MEDIA_PAD_FL_SINK) {
+ struct v4l2_mbus_framefmt *ffmt =
+ __ipu_isys_get_ffmt(sd, sd_state, sel->pad,
+ sel->which);
+
+ __r.width = ffmt->width;
+ __r.height = ffmt->height;
+ r = &__r;
+ tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP;
+ } else {
+ /* 0 is the sink pad. */
+ r = __ipu_isys_get_selection(sd, sd_state, sel->target, 0,
+ sel->which);
+ tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP;
+ }
+
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (pad->flags & MEDIA_PAD_FL_SINK) {
+ r = __ipu_isys_get_selection(sd, sd_state, V4L2_SEL_TGT_CROP,
+ sel->pad, sel->which);
+ tgt = IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE;
+ } else {
+ r = __ipu_isys_get_selection(sd, sd_state,
+ V4L2_SEL_TGT_COMPOSE, 0,
+ sel->which);
+ tgt = IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ sel->r.width = clamp(sel->r.width, IPU_ISYS_MIN_WIDTH, r->width);
+ sel->r.height = clamp(sel->r.height, IPU_ISYS_MIN_HEIGHT, r->height);
+ *__ipu_isys_get_selection(sd, sd_state, sel->target, sel->pad,
+ sel->which) = sel->r;
+ return ipu_isys_subdev_fmt_propagate(sd, sd_state, NULL, &sel->r, tgt,
+ sel->pad, sel->which);
+}
+
+int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ if (!target_valid(sd, sel->target, sel->pad))
+ return -EINVAL;
+
+ sel->r = *__ipu_isys_get_selection(sd, sd_state, sel->target,
+ sel->pad, sel->which);
+
+ return 0;
+}
+
+int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ const u32 *supported_codes = asd->supported_codes[code->pad];
+ u32 index;
+
+ for (index = 0; supported_codes[index]; index++) {
+ if (index == code->index) {
+ code->code = supported_codes[index];
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Besides validating the link, figure out the external pad and the
+ * ISYS FW ABI source.
+ */
+int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt)
+{
+ struct v4l2_subdev *source_sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+ struct ipu_isys_pipeline,
+ pipe);
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+
+ if (!source_sd)
+ return -ENODEV;
+ if (strncmp(source_sd->name, IPU_ISYS_ENTITY_PREFIX,
+ strlen(IPU_ISYS_ENTITY_PREFIX)) != 0) {
+ /*
+ * source_sd isn't ours --- sd must be the external
+ * sub-device.
+ */
+ ip->external = link->source;
+ ip->source = to_ipu_isys_subdev(sd)->source;
+ dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n",
+ sd->entity.name, ip->source);
+ } else if (source_sd->entity.num_pads == 1) {
+ /* All internal sources have a single pad. */
+ ip->external = link->source;
+ ip->source = to_ipu_isys_subdev(source_sd)->source;
+
+ dev_dbg(&asd->isys->adev->dev, "%s: using source %d\n",
+ sd->entity.name, ip->source);
+ }
+
+ if (asd->isl_mode != IPU_ISL_OFF)
+ ip->isl_mode = asd->isl_mode;
+
+ return v4l2_subdev_link_validate_default(sd, link, source_fmt,
+ sink_fmt);
+}
+
+int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ipu_isys_subdev *asd = to_ipu_isys_subdev(sd);
+ unsigned int i;
+
+ mutex_lock(&asd->mutex);
+
+ for (i = 0; i < asd->sd.entity.num_pads; i++) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->state, i);
+ struct v4l2_rect *try_crop =
+ v4l2_subdev_get_try_crop(sd, fh->state, i);
+ struct v4l2_rect *try_compose =
+ v4l2_subdev_get_try_compose(sd, fh->state, i);
+
+ *try_fmt = asd->ffmt[i];
+ *try_crop = asd->crop[i];
+ *try_compose = asd->compose[i];
+ }
+
+ mutex_unlock(&asd->mutex);
+
+ return 0;
+}
+
+int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+int ipu_isys_subdev_init(struct ipu_isys_subdev *asd,
+ struct v4l2_subdev_ops *ops,
+ unsigned int nr_ctrls,
+ unsigned int num_pads,
+ unsigned int num_source,
+ unsigned int num_sink,
+ unsigned int sd_flags)
+{
+ int rval = -EINVAL;
+
+ mutex_init(&asd->mutex);
+
+ v4l2_subdev_init(&asd->sd, ops);
+
+ asd->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | sd_flags;
+ asd->sd.owner = THIS_MODULE;
+ asd->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+
+ asd->nsources = num_source;
+ asd->nsinks = num_sink;
+
+ asd->pad = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+ sizeof(*asd->pad), GFP_KERNEL);
+
+ asd->ffmt = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+ sizeof(*asd->ffmt), GFP_KERNEL);
+
+ asd->crop = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+ sizeof(*asd->crop), GFP_KERNEL);
+
+ asd->compose = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+ sizeof(*asd->compose), GFP_KERNEL);
+
+ asd->valid_tgts = devm_kcalloc(&asd->isys->adev->dev, num_pads,
+ sizeof(*asd->valid_tgts), GFP_KERNEL);
+ if (!asd->pad || !asd->ffmt || !asd->crop || !asd->compose ||
+ !asd->valid_tgts)
+ return -ENOMEM;
+
+ rval = media_entity_pads_init(&asd->sd.entity, num_pads, asd->pad);
+ if (rval)
+ goto out_mutex_destroy;
+
+ if (asd->ctrl_init) {
+ rval = v4l2_ctrl_handler_init(&asd->ctrl_handler, nr_ctrls);
+ if (rval)
+ goto out_media_entity_cleanup;
+
+ asd->ctrl_init(&asd->sd);
+ if (asd->ctrl_handler.error) {
+ rval = asd->ctrl_handler.error;
+ goto out_v4l2_ctrl_handler_free;
+ }
+
+ asd->sd.ctrl_handler = &asd->ctrl_handler;
+ }
+
+ asd->source = -1;
+
+ return 0;
+
+out_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+
+out_media_entity_cleanup:
+ media_entity_cleanup(&asd->sd.entity);
+
+out_mutex_destroy:
+ mutex_destroy(&asd->mutex);
+
+ return rval;
+}
+
+void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd)
+{
+ media_entity_cleanup(&asd->sd.entity);
+ v4l2_ctrl_handler_free(&asd->ctrl_handler);
+ mutex_destroy(&asd->mutex);
+}
new file mode 100644
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_SUBDEV_H
+#define IPU_ISYS_SUBDEV_H
+
+#include <linux/mutex.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+
+#include "ipu-isys-queue.h"
+
+#define IPU_ISYS_MIPI_CSI2_TYPE_NULL 0x10
+#define IPU_ISYS_MIPI_CSI2_TYPE_BLANKING 0x11
+#define IPU_ISYS_MIPI_CSI2_TYPE_EMBEDDED8 0x12
+#define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_8 0x1e
+#define IPU_ISYS_MIPI_CSI2_TYPE_YUV422_10 0x1f
+#define IPU_ISYS_MIPI_CSI2_TYPE_RGB565 0x22
+#define IPU_ISYS_MIPI_CSI2_TYPE_RGB888 0x24
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW6 0x28
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW7 0x29
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW8 0x2a
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW10 0x2b
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW12 0x2c
+#define IPU_ISYS_MIPI_CSI2_TYPE_RAW14 0x2d
+/* 1-8 */
+#define IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(i) (0x30 + (i) - 1)
+
+#define FMT_ENTRY (struct ipu_isys_fmt_entry [])
+
+enum isys_subdev_prop_tgt {
+ IPU_ISYS_SUBDEV_PROP_TGT_SINK_FMT,
+ IPU_ISYS_SUBDEV_PROP_TGT_SINK_CROP,
+ IPU_ISYS_SUBDEV_PROP_TGT_SINK_COMPOSE,
+ IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_COMPOSE,
+ IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP,
+};
+
+#define IPU_ISYS_SUBDEV_PROP_TGT_NR_OF \
+ (IPU_ISYS_SUBDEV_PROP_TGT_SOURCE_CROP + 1)
+
+enum ipu_isl_mode {
+ IPU_ISL_OFF = 0, /* SOC BE */
+ IPU_ISL_CSI2_BE, /* RAW BE */
+};
+
+enum ipu_be_mode {
+ IPU_BE_RAW = 0,
+ IPU_BE_SOC
+};
+
+enum ipu_isys_subdev_pixelorder {
+ IPU_ISYS_SUBDEV_PIXELORDER_BGGR = 0,
+ IPU_ISYS_SUBDEV_PIXELORDER_GBRG,
+ IPU_ISYS_SUBDEV_PIXELORDER_GRBG,
+ IPU_ISYS_SUBDEV_PIXELORDER_RGGB,
+};
+
+struct ipu_isys;
+
+struct ipu_isys_subdev {
+ /* Serialise access to any other field in the struct */
+ struct mutex mutex;
+ struct v4l2_subdev sd;
+ struct ipu_isys *isys;
+ u32 const *const *supported_codes;
+ struct media_pad *pad;
+ struct v4l2_mbus_framefmt *ffmt;
+ struct v4l2_rect *crop;
+ struct v4l2_rect *compose;
+ unsigned int nsinks;
+ unsigned int nsources;
+ struct v4l2_ctrl_handler ctrl_handler;
+ void (*ctrl_init)(struct v4l2_subdev *sd);
+ void (*set_ffmt)(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt);
+ struct {
+ bool crop;
+ bool compose;
+ } *valid_tgts;
+ enum ipu_isl_mode isl_mode;
+ enum ipu_be_mode be_mode;
+ int source; /* SSI stream source; -1 if unset */
+};
+
+#define to_ipu_isys_subdev(__sd) \
+ container_of(__sd, struct ipu_isys_subdev, sd)
+
+struct v4l2_mbus_framefmt *__ipu_isys_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad,
+ unsigned int which);
+
+unsigned int ipu_isys_mbus_code_to_bpp(u32 code);
+unsigned int ipu_isys_mbus_code_to_mipi(u32 code);
+u32 ipu_isys_subdev_code_to_uncompressed(u32 sink_code);
+
+enum ipu_isys_subdev_pixelorder ipu_isys_subdev_get_pixelorder(u32 code);
+
+int ipu_isys_subdev_fmt_propagate(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_mbus_framefmt *ffmt,
+ struct v4l2_rect *r,
+ enum isys_subdev_prop_tgt tgt,
+ unsigned int pad, unsigned int which);
+
+int ipu_isys_subdev_set_ffmt_default(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt);
+int __ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt);
+struct v4l2_rect *__ipu_isys_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int target,
+ unsigned int pad,
+ unsigned int which);
+int ipu_isys_subdev_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt);
+int ipu_isys_subdev_get_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt);
+int ipu_isys_subdev_get_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel);
+int ipu_isys_subdev_set_sel(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel);
+int ipu_isys_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum
+ *code);
+int ipu_isys_subdev_link_validate(struct v4l2_subdev *sd,
+ struct media_link *link,
+ struct v4l2_subdev_format *source_fmt,
+ struct v4l2_subdev_format *sink_fmt);
+
+int ipu_isys_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int ipu_isys_subdev_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int ipu_isys_subdev_init(struct ipu_isys_subdev *asd,
+ struct v4l2_subdev_ops *ops,
+ unsigned int nr_ctrls,
+ unsigned int num_pads,
+ unsigned int num_source,
+ unsigned int num_sink,
+ unsigned int sd_flags);
+void ipu_isys_subdev_cleanup(struct ipu_isys_subdev *asd);
+#endif /* IPU_ISYS_SUBDEV_H */
new file mode 100644
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/module.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-isys.h"
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-tpg.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-isys-csi2-reg.h"
+
+static const u32 tpg_supported_codes_pad[] = {
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ 0,
+};
+
+static const u32 *tpg_supported_codes[] = {
+ tpg_supported_codes_pad,
+};
+
+static struct v4l2_subdev_internal_ops tpg_sd_internal_ops = {
+ .open = ipu_isys_subdev_open,
+ .close = ipu_isys_subdev_close,
+};
+
+static const struct v4l2_subdev_video_ops tpg_sd_video_ops = {
+ .s_stream = tpg_set_stream,
+};
+
+static int ipu_isys_tpg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ipu_isys_tpg *tpg = container_of(container_of(ctrl->handler,
+ struct
+ ipu_isys_subdev,
+ ctrl_handler),
+ struct ipu_isys_tpg, asd);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HBLANK:
+ writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_HBLANK_CYC);
+ break;
+ case V4L2_CID_VBLANK:
+ writel(ctrl->val, tpg->base + MIPI_GEN_REG_SYNG_VBLANK_CYC);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ writel(ctrl->val, tpg->base + MIPI_GEN_REG_TPG_MODE);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops ipu_isys_tpg_ctrl_ops = {
+ .s_ctrl = ipu_isys_tpg_s_ctrl,
+};
+
+static s64 ipu_isys_tpg_rate(struct ipu_isys_tpg *tpg, unsigned int bpp)
+{
+ return MIPI_GEN_PPC * IPU_ISYS_FREQ / bpp;
+}
+
+static const char *const tpg_mode_items[] = {
+ "Ramp",
+ "Checkerboard", /* Does not work, disabled. */
+ "Frame Based Colour",
+};
+
+static struct v4l2_ctrl_config tpg_mode = {
+ .ops = &ipu_isys_tpg_ctrl_ops,
+ .id = V4L2_CID_TEST_PATTERN,
+ .name = "Test Pattern",
+ .type = V4L2_CTRL_TYPE_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(tpg_mode_items) - 1,
+ .def = 0,
+ .menu_skip_mask = 0x2,
+ .qmenu = tpg_mode_items,
+};
+
+static const struct v4l2_ctrl_config csi2_header_cfg = {
+ .id = V4L2_CID_IPU_STORE_CSI2_HEADER,
+ .name = "Store CSI-2 Headers",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+static void ipu_isys_tpg_init_controls(struct v4l2_subdev *sd)
+{
+ struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd);
+ int hblank;
+ u64 default_pixel_rate;
+
+ hblank = 1024;
+
+ tpg->hblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler,
+ &ipu_isys_tpg_ctrl_ops,
+ V4L2_CID_HBLANK, 8, 65535, 1, hblank);
+
+ tpg->vblank = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler,
+ &ipu_isys_tpg_ctrl_ops,
+ V4L2_CID_VBLANK, 8, 65535, 1, 1024);
+
+ default_pixel_rate = ipu_isys_tpg_rate(tpg, 8);
+ tpg->pixel_rate = v4l2_ctrl_new_std(&tpg->asd.ctrl_handler,
+ &ipu_isys_tpg_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ default_pixel_rate,
+ default_pixel_rate,
+ 1, default_pixel_rate);
+ if (tpg->pixel_rate) {
+ tpg->pixel_rate->cur.val = default_pixel_rate;
+ tpg->pixel_rate->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ }
+
+ v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler, &tpg_mode, NULL);
+ tpg->store_csi2_header =
+ v4l2_ctrl_new_custom(&tpg->asd.ctrl_handler,
+ &csi2_header_cfg, NULL);
+}
+
+static void tpg_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.field = V4L2_FIELD_NONE;
+ *__ipu_isys_get_ffmt(sd, sd_state, fmt->pad, fmt->which) = fmt->format;
+}
+
+static int ipu_isys_tpg_set_ffmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ipu_isys_tpg *tpg = to_ipu_isys_tpg(sd);
+ __u32 code = tpg->asd.ffmt[TPG_PAD_SOURCE].code;
+ unsigned int bpp = ipu_isys_mbus_code_to_bpp(code);
+ s64 tpg_rate = ipu_isys_tpg_rate(tpg, bpp);
+ int rval;
+
+ mutex_lock(&tpg->asd.mutex);
+ rval = __ipu_isys_subdev_set_ffmt(sd, sd_state, fmt);
+ mutex_unlock(&tpg->asd.mutex);
+
+ if (rval || fmt->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return rval;
+
+ v4l2_ctrl_s_ctrl_int64(tpg->pixel_rate, tpg_rate);
+
+ return 0;
+}
+
+static const struct ipu_isys_pixelformat *
+ipu_isys_tpg_try_fmt(struct ipu_isys_video *av,
+ struct v4l2_pix_format_mplane *mpix)
+{
+ struct media_link *link = list_first_entry(&av->vdev.entity.links,
+ struct media_link, list);
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ struct ipu_isys_tpg *tpg;
+
+ if (!sd)
+ return NULL;
+
+ tpg = to_ipu_isys_tpg(sd);
+
+ return ipu_isys_video_try_fmt_vid_mplane(av, mpix,
+ v4l2_ctrl_g_ctrl(tpg->store_csi2_header));
+}
+
+static const struct v4l2_subdev_pad_ops tpg_sd_pad_ops = {
+ .get_fmt = ipu_isys_subdev_get_ffmt,
+ .set_fmt = ipu_isys_tpg_set_ffmt,
+ .enum_mbus_code = ipu_isys_subdev_enum_mbus_code,
+};
+
+static int subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+#ifdef IPU_TPG_FRAME_SYNC
+ case V4L2_EVENT_FRAME_SYNC:
+ return v4l2_event_subscribe(fh, sub, 10, NULL);
+#endif
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+};
+
+/* V4L2 subdev core operations */
+static const struct v4l2_subdev_core_ops tpg_sd_core_ops = {
+ .subscribe_event = subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static struct v4l2_subdev_ops tpg_sd_ops = {
+ .core = &tpg_sd_core_ops,
+ .video = &tpg_sd_video_ops,
+ .pad = &tpg_sd_pad_ops,
+};
+
+static struct media_entity_operations tpg_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg)
+{
+ v4l2_device_unregister_subdev(&tpg->asd.sd);
+ ipu_isys_subdev_cleanup(&tpg->asd);
+ ipu_isys_video_cleanup(&tpg->av);
+}
+
+int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg,
+ struct ipu_isys *isys,
+ void __iomem *base, void __iomem *sel,
+ unsigned int index)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = TPG_PAD_SOURCE,
+ .format = {
+ .width = 4096,
+ .height = 3072,
+ },
+ };
+ int rval;
+
+ tpg->isys = isys;
+ tpg->base = base;
+ tpg->sel = sel;
+ tpg->index = index;
+
+ tpg->asd.sd.entity.ops = &tpg_entity_ops;
+ tpg->asd.ctrl_init = ipu_isys_tpg_init_controls;
+ tpg->asd.isys = isys;
+
+ rval = ipu_isys_subdev_init(&tpg->asd, &tpg_sd_ops, 5,
+ NR_OF_TPG_PADS,
+ NR_OF_TPG_SOURCE_PADS,
+ NR_OF_TPG_SINK_PADS,
+ V4L2_SUBDEV_FL_HAS_EVENTS);
+ if (rval)
+ return rval;
+
+ tpg->asd.sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ tpg->asd.pad[TPG_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ tpg->asd.source = IPU_FW_ISYS_STREAM_SRC_MIPIGEN_PORT0 + index;
+ tpg->asd.supported_codes = tpg_supported_codes;
+ tpg->asd.set_ffmt = tpg_set_ffmt;
+ ipu_isys_subdev_set_ffmt(&tpg->asd.sd, NULL, &fmt);
+
+ tpg->asd.sd.internal_ops = &tpg_sd_internal_ops;
+ snprintf(tpg->asd.sd.name, sizeof(tpg->asd.sd.name),
+ IPU_ISYS_ENTITY_PREFIX " TPG %u", index);
+ v4l2_set_subdevdata(&tpg->asd.sd, &tpg->asd);
+ rval = v4l2_device_register_subdev(&isys->v4l2_dev, &tpg->asd.sd);
+ if (rval) {
+ dev_info(&isys->adev->dev, "can't register v4l2 subdev\n");
+ goto fail;
+ }
+
+ snprintf(tpg->av.vdev.name, sizeof(tpg->av.vdev.name),
+ IPU_ISYS_ENTITY_PREFIX " TPG %u capture", index);
+ tpg->av.isys = isys;
+ tpg->av.aq.css_pin_type = IPU_FW_ISYS_PIN_TYPE_MIPI;
+ tpg->av.pfmts = ipu_isys_pfmts_packed;
+ tpg->av.try_fmt_vid_mplane = ipu_isys_tpg_try_fmt;
+ tpg->av.prepare_fw_stream =
+ ipu_isys_prepare_fw_cfg_default;
+ tpg->av.packed = true;
+ tpg->av.line_header_length = IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE;
+ tpg->av.line_footer_length = IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE;
+ tpg->av.aq.buf_prepare = ipu_isys_buf_prepare;
+ tpg->av.aq.fill_frame_buff_set_pin =
+ ipu_isys_buffer_to_fw_frame_buff_pin;
+ tpg->av.aq.link_fmt_validate = ipu_isys_link_fmt_validate;
+ tpg->av.aq.vbq.buf_struct_size = sizeof(struct ipu_isys_video_buffer);
+
+ rval = ipu_isys_video_init(&tpg->av, &tpg->asd.sd.entity,
+ TPG_PAD_SOURCE, MEDIA_PAD_FL_SINK, 0);
+ if (rval) {
+ dev_info(&isys->adev->dev, "can't init video node\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipu_isys_tpg_cleanup(tpg);
+
+ return rval;
+}
new file mode 100644
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_TPG_H
+#define IPU_ISYS_TPG_H
+
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#include "ipu-isys-subdev.h"
+#include "ipu-isys-video.h"
+#include "ipu-isys-queue.h"
+
+struct ipu_isys_tpg_pdata;
+struct ipu_isys;
+
+#define TPG_PAD_SOURCE 0
+#define NR_OF_TPG_PADS 1
+#define NR_OF_TPG_SOURCE_PADS 1
+#define NR_OF_TPG_SINK_PADS 0
+#define NR_OF_TPG_STREAMS 1
+
+/*
+ * PPC is 4 pixels for clock for RAW8, RAW10 and RAW12.
+ * Source: FW validation test code.
+ */
+#define MIPI_GEN_PPC 4
+
+#define MIPI_GEN_REG_COM_ENABLE 0x0
+#define MIPI_GEN_REG_COM_DTYPE 0x4
+/* RAW8, RAW10 or RAW12 */
+#define MIPI_GEN_COM_DTYPE_RAW(n) (((n) - 8) / 2)
+#define MIPI_GEN_REG_COM_VTYPE 0x8
+#define MIPI_GEN_REG_COM_VCHAN 0xc
+#define MIPI_GEN_REG_COM_WCOUNT 0x10
+#define MIPI_GEN_REG_PRBS_RSTVAL0 0x14
+#define MIPI_GEN_REG_PRBS_RSTVAL1 0x18
+#define MIPI_GEN_REG_SYNG_FREE_RUN 0x1c
+#define MIPI_GEN_REG_SYNG_PAUSE 0x20
+#define MIPI_GEN_REG_SYNG_NOF_FRAMES 0x24
+#define MIPI_GEN_REG_SYNG_NOF_PIXELS 0x28
+#define MIPI_GEN_REG_SYNG_NOF_LINES 0x2c
+#define MIPI_GEN_REG_SYNG_HBLANK_CYC 0x30
+#define MIPI_GEN_REG_SYNG_VBLANK_CYC 0x34
+#define MIPI_GEN_REG_SYNG_STAT_HCNT 0x38
+#define MIPI_GEN_REG_SYNG_STAT_VCNT 0x3c
+#define MIPI_GEN_REG_SYNG_STAT_FCNT 0x40
+#define MIPI_GEN_REG_SYNG_STAT_DONE 0x44
+#define MIPI_GEN_REG_TPG_MODE 0x48
+#define MIPI_GEN_REG_TPG_HCNT_MASK 0x4c
+#define MIPI_GEN_REG_TPG_VCNT_MASK 0x50
+#define MIPI_GEN_REG_TPG_XYCNT_MASK 0x54
+#define MIPI_GEN_REG_TPG_HCNT_DELTA 0x58
+#define MIPI_GEN_REG_TPG_VCNT_DELTA 0x5c
+#define MIPI_GEN_REG_TPG_R1 0x60
+#define MIPI_GEN_REG_TPG_G1 0x64
+#define MIPI_GEN_REG_TPG_B1 0x68
+#define MIPI_GEN_REG_TPG_R2 0x6c
+#define MIPI_GEN_REG_TPG_G2 0x70
+#define MIPI_GEN_REG_TPG_B2 0x74
+
+/*
+ * struct ipu_isys_tpg
+ *
+ * @nlanes: number of lanes in the receiver
+ */
+struct ipu_isys_tpg {
+ struct ipu_isys_tpg_pdata *pdata;
+ struct ipu_isys *isys;
+ struct ipu_isys_subdev asd;
+ struct ipu_isys_video av;
+
+ void __iomem *base;
+ void __iomem *sel;
+ unsigned int index;
+ int streaming;
+
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *store_csi2_header;
+};
+
+#define to_ipu_isys_tpg(sd) \
+ container_of(to_ipu_isys_subdev(sd), \
+ struct ipu_isys_tpg, asd)
+#ifdef IPU_TPG_FRAME_SYNC
+void ipu_isys_tpg_sof_event(struct ipu_isys_tpg *tpg);
+void ipu_isys_tpg_eof_event(struct ipu_isys_tpg *tpg);
+#endif
+int ipu_isys_tpg_init(struct ipu_isys_tpg *tpg,
+ struct ipu_isys *isys,
+ void __iomem *base, void __iomem *sel,
+ unsigned int index);
+void ipu_isys_tpg_cleanup(struct ipu_isys_tpg *tpg);
+int tpg_set_stream(struct v4l2_subdev *sd, int enable);
+
+#endif /* IPU_ISYS_TPG_H */
new file mode 100644
@@ -0,0 +1,1748 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2021 Intel Corporation
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/init_task.h>
+#include <linux/kthread.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/compat.h>
+
+#include <uapi/linux/sched/types.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-cpd.h"
+#include "ipu-isys.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-trace.h"
+#include "ipu-fw-isys.h"
+#include "ipu-fw-com.h"
+
+/* use max resolution pixel rate by default */
+#define DEFAULT_PIXEL_RATE (360000000ULL * 2 * 4 / 10)
+
+const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[] = {
+ {V4L2_PIX_FMT_Y10, 16, 10, 0, MEDIA_BUS_FMT_Y10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16,
+ IPU_FW_ISYS_FRAME_FORMAT_UYVY},
+ {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16,
+ IPU_FW_ISYS_FRAME_FORMAT_YUYV},
+ {V4L2_PIX_FMT_NV16, 16, 16, 8, MEDIA_BUS_FMT_UYVY8_1X16,
+ IPU_FW_ISYS_FRAME_FORMAT_NV16},
+ {V4L2_PIX_FMT_XRGB32, 32, 32, 0, MEDIA_BUS_FMT_RGB565_1X16,
+ IPU_FW_ISYS_FRAME_FORMAT_RGBA888},
+ {V4L2_PIX_FMT_XBGR32, 32, 32, 0, MEDIA_BUS_FMT_RGB888_1X24,
+ IPU_FW_ISYS_FRAME_FORMAT_RGBA888},
+ /* Raw bayer formats. */
+ {V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {}
+};
+
+const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[] = {
+ {V4L2_PIX_FMT_Y10, 10, 10, 0, MEDIA_BUS_FMT_Y10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+#ifdef V4L2_PIX_FMT_Y210
+ {V4L2_PIX_FMT_Y210, 20, 20, 0, MEDIA_BUS_FMT_YUYV10_1X20,
+ IPU_FW_ISYS_FRAME_FORMAT_YUYV},
+#endif
+ {V4L2_PIX_FMT_UYVY, 16, 16, 0, MEDIA_BUS_FMT_UYVY8_1X16,
+ IPU_FW_ISYS_FRAME_FORMAT_UYVY},
+ {V4L2_PIX_FMT_YUYV, 16, 16, 0, MEDIA_BUS_FMT_YUYV8_1X16,
+ IPU_FW_ISYS_FRAME_FORMAT_YUYV},
+ {V4L2_PIX_FMT_RGB565, 16, 16, 0, MEDIA_BUS_FMT_RGB565_1X16,
+ IPU_FW_ISYS_FRAME_FORMAT_RGB565},
+ {V4L2_PIX_FMT_BGR24, 24, 24, 0, MEDIA_BUS_FMT_RGB888_1X24,
+ IPU_FW_ISYS_FRAME_FORMAT_RGBA888},
+#ifndef V4L2_PIX_FMT_SBGGR12P
+ {V4L2_PIX_FMT_SBGGR12, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SGBRG12, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SGRBG12, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SRGGB12, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+#else /* V4L2_PIX_FMT_SBGGR12P */
+ {V4L2_PIX_FMT_SBGGR12P, 12, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SGBRG12P, 12, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SGRBG12P, 12, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+ {V4L2_PIX_FMT_SRGGB12P, 12, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW12},
+#endif /* V4L2_PIX_FMT_SBGGR12P */
+ {V4L2_PIX_FMT_SBGGR10P, 10, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_SGBRG10P, 10, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_SGRBG10P, 10, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_SRGGB10P, 10, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW10},
+ {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {}
+};
+
+static int video_open(struct file *file)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+ struct ipu_isys *isys = av->isys;
+ struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+ struct ipu_device *isp = adev->isp;
+ int rval;
+ const struct ipu_isys_internal_pdata *ipdata;
+
+ mutex_lock(&isys->mutex);
+
+ if (isys->reset_needed || isp->flr_done) {
+ mutex_unlock(&isys->mutex);
+ dev_warn(&isys->adev->dev, "isys power cycle required\n");
+ return -EIO;
+ }
+ mutex_unlock(&isys->mutex);
+
+ rval = pm_runtime_get_sync(&isys->adev->dev);
+ if (rval < 0) {
+ pm_runtime_put_noidle(&isys->adev->dev);
+ return rval;
+ }
+
+ rval = v4l2_fh_open(file);
+ if (rval)
+ goto out_power_down;
+
+ rval = v4l2_pipeline_pm_get(&av->vdev.entity);
+ if (rval)
+ goto out_v4l2_fh_release;
+
+ mutex_lock(&isys->mutex);
+
+ if (isys->video_opened++) {
+ /* Already open */
+ mutex_unlock(&isys->mutex);
+ return 0;
+ }
+
+ ipdata = isys->pdata->ipdata;
+ ipu_configure_spc(adev->isp,
+ &ipdata->hw_variant,
+ IPU_CPD_PKG_DIR_ISYS_SERVER_IDX,
+ isys->pdata->base, isys->pkg_dir,
+ isys->pkg_dir_dma_addr);
+
+ /*
+ * Buffers could have been left to wrong queue at last closure.
+ * Move them now back to empty buffer queue.
+ */
+ ipu_cleanup_fw_msg_bufs(isys);
+
+ if (isys->fwcom) {
+ /*
+ * Something went wrong in previous shutdown. As we are now
+ * restarting isys we can safely delete old context.
+ */
+ dev_err(&isys->adev->dev, "Clearing old context\n");
+ ipu_fw_isys_cleanup(isys);
+ }
+
+ rval = ipu_fw_isys_init(av->isys, ipdata->num_parallel_streams);
+ if (rval < 0)
+ goto out_lib_init;
+
+ mutex_unlock(&isys->mutex);
+
+ return 0;
+
+out_lib_init:
+ isys->video_opened--;
+ mutex_unlock(&isys->mutex);
+ v4l2_pipeline_pm_put(&av->vdev.entity);
+
+out_v4l2_fh_release:
+ v4l2_fh_release(file);
+out_power_down:
+ pm_runtime_put(&isys->adev->dev);
+
+ return rval;
+}
+
+static int video_release(struct file *file)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+ int ret = 0;
+
+ vb2_fop_release(file);
+
+ mutex_lock(&av->isys->mutex);
+
+ if (!--av->isys->video_opened) {
+ ipu_fw_isys_close(av->isys);
+ if (av->isys->fwcom) {
+ av->isys->reset_needed = true;
+ ret = -EIO;
+ }
+ }
+
+ mutex_unlock(&av->isys->mutex);
+
+ v4l2_pipeline_pm_put(&av->vdev.entity);
+
+ if (av->isys->reset_needed)
+ pm_runtime_put_sync(&av->isys->adev->dev);
+ else
+ pm_runtime_put(&av->isys->adev->dev);
+
+ return ret;
+}
+
+static struct media_pad *other_pad(struct media_pad *pad)
+{
+ struct media_link *link;
+
+ list_for_each_entry(link, &pad->entity->links, list) {
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE)
+ != MEDIA_LNK_FL_DATA_LINK)
+ continue;
+
+ return link->source == pad ? link->sink : link->source;
+ }
+
+ WARN_ON(1);
+ return NULL;
+}
+
+const struct ipu_isys_pixelformat *
+ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat)
+{
+ struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]);
+ struct v4l2_subdev *sd;
+ const u32 *supported_codes;
+ const struct ipu_isys_pixelformat *pfmt;
+
+ if (!pad || !pad->entity) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index];
+
+ for (pfmt = av->pfmts; pfmt->bpp; pfmt++) {
+ unsigned int i;
+
+ if (pfmt->pixelformat != pixelformat)
+ continue;
+
+ for (i = 0; supported_codes[i]; i++) {
+ if (pfmt->code == supported_codes[i])
+ return pfmt;
+ }
+ }
+
+ /* Not found. Get the default, i.e. the first defined one. */
+ for (pfmt = av->pfmts; pfmt->bpp; pfmt++) {
+ if (pfmt->code == *supported_codes)
+ return pfmt;
+ }
+
+ WARN_ON(1);
+ return NULL;
+}
+
+int ipu_isys_vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+
+ strlcpy(cap->driver, IPU_ISYS_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, av->isys->media_dev.model, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ av->isys->media_dev.bus_info);
+ return 0;
+}
+
+int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+ struct media_pad *pad = other_pad(&av->vdev.entity.pads[0]);
+ struct v4l2_subdev *sd;
+ const u32 *supported_codes;
+ const struct ipu_isys_pixelformat *pfmt;
+ u32 index;
+
+ if (!pad || !pad->entity)
+ return -EINVAL;
+ sd = media_entity_to_v4l2_subdev(pad->entity);
+ supported_codes = to_ipu_isys_subdev(sd)->supported_codes[pad->index];
+
+ /* Walk the 0-terminated array for the f->index-th code. */
+ for (index = f->index; *supported_codes && index;
+ index--, supported_codes++) {
+ };
+
+ if (!*supported_codes)
+ return -EINVAL;
+
+ f->flags = 0;
+
+ /* Code found */
+ for (pfmt = av->pfmts; pfmt->bpp; pfmt++)
+ if (pfmt->code == *supported_codes)
+ break;
+
+ if (!pfmt->bpp) {
+ dev_warn(&av->isys->adev->dev,
+ "Format not found in mapping table.");
+ return -EINVAL;
+ }
+
+ f->pixelformat = pfmt->pixelformat;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *fmt)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+
+ fmt->fmt.pix_mp = av->mpix;
+
+ return 0;
+}
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av,
+ struct v4l2_pix_format_mplane *mpix)
+{
+ return ipu_isys_video_try_fmt_vid_mplane(av, mpix, 0);
+}
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av,
+ struct v4l2_pix_format_mplane *mpix,
+ int store_csi2_header)
+{
+ const struct ipu_isys_pixelformat *pfmt =
+ ipu_isys_get_pixelformat(av, mpix->pixelformat);
+
+ if (!pfmt)
+ return NULL;
+ mpix->pixelformat = pfmt->pixelformat;
+ mpix->num_planes = 1;
+
+ mpix->width = clamp(mpix->width, IPU_ISYS_MIN_WIDTH,
+ IPU_ISYS_MAX_WIDTH);
+ mpix->height = clamp(mpix->height, IPU_ISYS_MIN_HEIGHT,
+ IPU_ISYS_MAX_HEIGHT);
+
+ if (!av->packed)
+ mpix->plane_fmt[0].bytesperline =
+ mpix->width * DIV_ROUND_UP(pfmt->bpp_planar ?
+ pfmt->bpp_planar : pfmt->bpp,
+ BITS_PER_BYTE);
+ else if (store_csi2_header)
+ mpix->plane_fmt[0].bytesperline =
+ DIV_ROUND_UP(av->line_header_length +
+ av->line_footer_length +
+ (unsigned int)mpix->width * pfmt->bpp,
+ BITS_PER_BYTE);
+ else
+ mpix->plane_fmt[0].bytesperline =
+ DIV_ROUND_UP((unsigned int)mpix->width * pfmt->bpp,
+ BITS_PER_BYTE);
+
+ mpix->plane_fmt[0].bytesperline = ALIGN(mpix->plane_fmt[0].bytesperline,
+ av->isys->line_align);
+
+ if (pfmt->bpp_planar)
+ mpix->plane_fmt[0].bytesperline =
+ mpix->plane_fmt[0].bytesperline *
+ pfmt->bpp / pfmt->bpp_planar;
+ /*
+ * (height + 1) * bytesperline due to a hardware issue: the DMA unit
+ * is a power of two, and a line should be transferred as few units
+ * as possible. The result is that up to line length more data than
+ * the image size may be transferred to memory after the image.
+ * Another limition is the GDA allocation unit size. For low
+ * resolution it gives a bigger number. Use larger one to avoid
+ * memory corruption.
+ */
+ mpix->plane_fmt[0].sizeimage =
+ max(max(mpix->plane_fmt[0].sizeimage,
+ mpix->plane_fmt[0].bytesperline * mpix->height +
+ max(mpix->plane_fmt[0].bytesperline,
+ av->isys->pdata->ipdata->isys_dma_overshoot)), 1U);
+
+ if (av->compression_ctrl)
+ av->compression = v4l2_ctrl_g_ctrl(av->compression_ctrl);
+
+ /* overwrite bpl/height with compression alignment */
+ if (av->compression) {
+ u32 planar_tile_status_size, tile_status_size;
+
+ mpix->plane_fmt[0].bytesperline =
+ ALIGN(mpix->plane_fmt[0].bytesperline,
+ IPU_ISYS_COMPRESSION_LINE_ALIGN);
+ mpix->height = ALIGN(mpix->height,
+ IPU_ISYS_COMPRESSION_HEIGHT_ALIGN);
+
+ mpix->plane_fmt[0].sizeimage =
+ ALIGN(mpix->plane_fmt[0].bytesperline * mpix->height,
+ IPU_ISYS_COMPRESSION_PAGE_ALIGN);
+
+ /* ISYS compression only for RAW and single plannar */
+ planar_tile_status_size =
+ DIV_ROUND_UP_ULL((mpix->plane_fmt[0].bytesperline *
+ mpix->height /
+ IPU_ISYS_COMPRESSION_TILE_SIZE_BYTES) *
+ IPU_ISYS_COMPRESSION_TILE_STATUS_BITS,
+ BITS_PER_BYTE);
+ tile_status_size = ALIGN(planar_tile_status_size,
+ IPU_ISYS_COMPRESSION_PAGE_ALIGN);
+
+ /* tile status buffer offsets relative to buffer base address */
+ av->ts_offsets[0] = mpix->plane_fmt[0].sizeimage;
+ mpix->plane_fmt[0].sizeimage += tile_status_size;
+
+ dev_dbg(&av->isys->adev->dev,
+ "cmprs: bpl:%d, height:%d img size:%d, ts_sz:%d\n",
+ mpix->plane_fmt[0].bytesperline, mpix->height,
+ av->ts_offsets[0], tile_status_size);
+ }
+
+ memset(mpix->plane_fmt[0].reserved, 0,
+ sizeof(mpix->plane_fmt[0].reserved));
+
+ if (mpix->field == V4L2_FIELD_ANY)
+ mpix->field = V4L2_FIELD_NONE;
+ /* Use defaults */
+ mpix->colorspace = V4L2_COLORSPACE_RAW;
+ mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
+ mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return pfmt;
+}
+
+static int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+
+ if (av->aq.vbq.streaming)
+ return -EBUSY;
+
+ av->pfmt = av->try_fmt_vid_mplane(av, &f->fmt.pix_mp);
+ av->mpix = f->fmt.pix_mp;
+
+ return 0;
+}
+
+static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+
+ av->try_fmt_vid_mplane(av, &f->fmt.pix_mp);
+
+ return 0;
+}
+
+static long ipu_isys_vidioc_private(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd,
+ void *arg)
+{
+ struct ipu_isys_video *av = video_drvdata(file);
+ int ret = 0;
+
+ switch (cmd) {
+ case VIDIOC_IPU_GET_DRIVER_VERSION:
+ *(u32 *)arg = IPU_DRIVER_VERSION;
+ break;
+
+ default:
+ dev_dbg(&av->isys->adev->dev, "unsupported private ioctl %x\n",
+ cmd);
+ }
+
+ return ret;
+}
+
+static int vidioc_enum_input(struct file *file, void *fh,
+ struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+ strlcpy(input->name, "camera", sizeof(input->name));
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ *input = 0;
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
+{
+ return input == 0 ? 0 : -EINVAL;
+}
+
+/*
+ * Return true if an entity directly connected to an Iunit entity is
+ * an image source for the ISP. This can be any external directly
+ * connected entity or any of the test pattern generators in the
+ * Iunit.
+ */
+static bool is_external(struct ipu_isys_video *av, struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+
+ /* All video nodes are ours. */
+ if (!is_media_entity_v4l2_subdev(entity))
+ return false;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+ if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX,
+ strlen(IPU_ISYS_ENTITY_PREFIX)) != 0)
+ return true;
+
+ return false;
+}
+
+static int link_validate(struct media_link *link)
+{
+ struct ipu_isys_video *av =
+ container_of(link->sink, struct ipu_isys_video, pad);
+ /* All sub-devices connected to a video node are ours. */
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct v4l2_subdev *sd;
+
+ if (!link->source->entity)
+ return -EINVAL;
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ if (is_external(av, link->source->entity)) {
+ ip->external = media_entity_remote_pad(av->vdev.entity.pads);
+ ip->source = to_ipu_isys_subdev(sd)->source;
+ }
+
+ ip->nr_queues++;
+
+ return 0;
+}
+
+static void get_stream_opened(struct ipu_isys_video *av)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->lock, flags);
+ av->isys->stream_opened++;
+ spin_unlock_irqrestore(&av->isys->lock, flags);
+}
+
+static void put_stream_opened(struct ipu_isys_video *av)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->lock, flags);
+ av->isys->stream_opened--;
+ spin_unlock_irqrestore(&av->isys->lock, flags);
+}
+
+static int get_stream_handle(struct ipu_isys_video *av)
+{
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ unsigned int stream_handle;
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->lock, flags);
+ for (stream_handle = 0;
+ stream_handle < IPU_ISYS_MAX_STREAMS; stream_handle++)
+ if (!av->isys->pipes[stream_handle])
+ break;
+ if (stream_handle == IPU_ISYS_MAX_STREAMS) {
+ spin_unlock_irqrestore(&av->isys->lock, flags);
+ return -EBUSY;
+ }
+ av->isys->pipes[stream_handle] = ip;
+ ip->stream_handle = stream_handle;
+ spin_unlock_irqrestore(&av->isys->lock, flags);
+ return 0;
+}
+
+static void put_stream_handle(struct ipu_isys_video *av)
+{
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&av->isys->lock, flags);
+ av->isys->pipes[ip->stream_handle] = NULL;
+ ip->stream_handle = -1;
+ spin_unlock_irqrestore(&av->isys->lock, flags);
+}
+
+static int get_external_facing_format(struct ipu_isys_pipeline *ip,
+ struct v4l2_subdev_format *format)
+{
+ struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+ struct v4l2_subdev *sd;
+ struct media_pad *external_facing;
+
+ if (!ip->external->entity) {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+ sd = media_entity_to_v4l2_subdev(ip->external->entity);
+ external_facing = (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX,
+ strlen(IPU_ISYS_ENTITY_PREFIX)) == 0) ?
+ ip->external :
+ media_entity_remote_pad(ip->external);
+ if (WARN_ON(!external_facing)) {
+ dev_warn(&av->isys->adev->dev,
+ "no external facing pad --- driver bug?\n");
+ return -EINVAL;
+ }
+
+ format->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ format->pad = 0;
+ sd = media_entity_to_v4l2_subdev(external_facing->entity);
+
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, format);
+}
+
+static void short_packet_queue_destroy(struct ipu_isys_pipeline *ip)
+{
+ struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+ unsigned int i;
+
+ if (!ip->short_packet_bufs)
+ return;
+ for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) {
+ if (ip->short_packet_bufs[i].buffer)
+ dma_free_coherent(&av->isys->adev->dev,
+ ip->short_packet_buffer_size,
+ ip->short_packet_bufs[i].buffer,
+ ip->short_packet_bufs[i].dma_addr);
+ }
+ kfree(ip->short_packet_bufs);
+ ip->short_packet_bufs = NULL;
+}
+
+static int short_packet_queue_setup(struct ipu_isys_pipeline *ip)
+{
+ struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+ struct v4l2_subdev_format source_fmt = { 0 };
+ unsigned int i;
+ int rval;
+ size_t buf_size;
+
+ INIT_LIST_HEAD(&ip->pending_interlaced_bufs);
+ ip->cur_field = V4L2_FIELD_TOP;
+
+ if (ip->isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) {
+ ip->short_packet_trace_index = 0;
+ return 0;
+ }
+
+ rval = get_external_facing_format(ip, &source_fmt);
+ if (rval)
+ return rval;
+ buf_size = IPU_ISYS_SHORT_PACKET_BUF_SIZE(source_fmt.format.height);
+ ip->short_packet_buffer_size = buf_size;
+ ip->num_short_packet_lines =
+ IPU_ISYS_SHORT_PACKET_PKT_LINES(source_fmt.format.height);
+
+ /* Initialize short packet queue. */
+ INIT_LIST_HEAD(&ip->short_packet_incoming);
+ INIT_LIST_HEAD(&ip->short_packet_active);
+
+ ip->short_packet_bufs =
+ kzalloc(sizeof(struct ipu_isys_private_buffer) *
+ IPU_ISYS_SHORT_PACKET_BUFFER_NUM, GFP_KERNEL);
+ if (!ip->short_packet_bufs)
+ return -ENOMEM;
+
+ for (i = 0; i < IPU_ISYS_SHORT_PACKET_BUFFER_NUM; i++) {
+ struct ipu_isys_private_buffer *buf = &ip->short_packet_bufs[i];
+
+ buf->index = (unsigned int)i;
+ buf->ip = ip;
+ buf->ib.type = IPU_ISYS_SHORT_PACKET_BUFFER;
+ buf->bytesused = buf_size;
+ buf->buffer = dma_alloc_coherent(&av->isys->adev->dev, buf_size,
+ &buf->dma_addr, GFP_KERNEL);
+ if (!buf->buffer) {
+ short_packet_queue_destroy(ip);
+ return -ENOMEM;
+ }
+ list_add(&buf->ib.head, &ip->short_packet_incoming);
+ }
+
+ return 0;
+}
+
+static void
+csi_short_packet_prepare_fw_cfg(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_stream_cfg_data_abi *cfg)
+{
+ int input_pin = cfg->nof_input_pins++;
+ int output_pin = cfg->nof_output_pins++;
+ struct ipu_fw_isys_input_pin_info_abi *input_info =
+ &cfg->input_pins[input_pin];
+ struct ipu_fw_isys_output_pin_info_abi *output_info =
+ &cfg->output_pins[output_pin];
+ struct ipu_isys *isys = ip->isys;
+
+ /*
+ * Setting dt as IPU_ISYS_SHORT_PACKET_GENERAL_DT will cause
+ * MIPI receiver to receive all MIPI short packets.
+ */
+ input_info->dt = IPU_ISYS_SHORT_PACKET_GENERAL_DT;
+ input_info->input_res.width = IPU_ISYS_SHORT_PACKET_WIDTH;
+ input_info->input_res.height = ip->num_short_packet_lines;
+
+ ip->output_pins[output_pin].pin_ready =
+ ipu_isys_queue_short_packet_ready;
+ ip->output_pins[output_pin].aq = NULL;
+ ip->short_packet_output_pin = output_pin;
+
+ output_info->input_pin_id = input_pin;
+ output_info->output_res.width = IPU_ISYS_SHORT_PACKET_WIDTH;
+ output_info->output_res.height = ip->num_short_packet_lines;
+ output_info->stride = IPU_ISYS_SHORT_PACKET_WIDTH *
+ IPU_ISYS_SHORT_PACKET_UNITSIZE;
+ output_info->pt = IPU_ISYS_SHORT_PACKET_PT;
+ output_info->ft = IPU_ISYS_SHORT_PACKET_FT;
+ output_info->send_irq = 1;
+ memset(output_info->ts_offsets, 0, sizeof(output_info->ts_offsets));
+ output_info->s2m_pixel_soc_pixel_remapping =
+ S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+ output_info->csi_be_soc_pixel_remapping =
+ CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+ output_info->sensor_type = isys->sensor_info.sensor_metadata;
+ output_info->snoopable = true;
+ output_info->error_handling_enable = false;
+}
+
+void
+ipu_isys_prepare_fw_cfg_default(struct ipu_isys_video *av,
+ struct ipu_fw_isys_stream_cfg_data_abi *cfg)
+{
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct ipu_isys_queue *aq = &av->aq;
+ struct ipu_fw_isys_output_pin_info_abi *pin_info;
+ struct ipu_isys *isys = av->isys;
+ unsigned int type_index, type;
+ int pin = cfg->nof_output_pins++;
+
+ aq->fw_output = pin;
+ ip->output_pins[pin].pin_ready = ipu_isys_queue_buf_ready;
+ ip->output_pins[pin].aq = aq;
+
+ pin_info = &cfg->output_pins[pin];
+ pin_info->input_pin_id = 0;
+ pin_info->output_res.width = av->mpix.width;
+ pin_info->output_res.height = av->mpix.height;
+
+ if (!av->pfmt->bpp_planar)
+ pin_info->stride = av->mpix.plane_fmt[0].bytesperline;
+ else
+ pin_info->stride = ALIGN(DIV_ROUND_UP(av->mpix.width *
+ av->pfmt->bpp_planar,
+ BITS_PER_BYTE),
+ av->isys->line_align);
+
+ pin_info->pt = aq->css_pin_type;
+ pin_info->ft = av->pfmt->css_pixelformat;
+ pin_info->send_irq = 1;
+ memset(pin_info->ts_offsets, 0, sizeof(pin_info->ts_offsets));
+ pin_info->s2m_pixel_soc_pixel_remapping =
+ S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+ pin_info->csi_be_soc_pixel_remapping =
+ CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING;
+ cfg->vc = 0;
+
+ switch (pin_info->pt) {
+ /* non-snoopable sensor data to PSYS */
+ case IPU_FW_ISYS_PIN_TYPE_RAW_NS:
+ type_index = IPU_FW_ISYS_VC1_SENSOR_DATA;
+ pin_info->sensor_type = isys->sensor_types[type_index]++;
+ pin_info->snoopable = false;
+ pin_info->error_handling_enable = false;
+ type = isys->sensor_types[type_index];
+ if (type > isys->sensor_info.vc1_data_end)
+ isys->sensor_types[type_index] =
+ isys->sensor_info.vc1_data_start;
+
+ break;
+ /* snoopable META/Stats data to CPU */
+ case IPU_FW_ISYS_PIN_TYPE_METADATA_0:
+ case IPU_FW_ISYS_PIN_TYPE_METADATA_1:
+ pin_info->sensor_type = isys->sensor_info.sensor_metadata;
+ pin_info->snoopable = true;
+ pin_info->error_handling_enable = false;
+ break;
+ case IPU_FW_ISYS_PIN_TYPE_RAW_SOC:
+ if (av->compression) {
+ type_index = IPU_FW_ISYS_VC1_SENSOR_DATA;
+ pin_info->sensor_type
+ = isys->sensor_types[type_index]++;
+ pin_info->snoopable = false;
+ pin_info->error_handling_enable = false;
+ type = isys->sensor_types[type_index];
+ if (type > isys->sensor_info.vc1_data_end)
+ isys->sensor_types[type_index] =
+ isys->sensor_info.vc1_data_start;
+ } else {
+ type_index = IPU_FW_ISYS_VC0_SENSOR_DATA;
+ pin_info->sensor_type
+ = isys->sensor_types[type_index]++;
+ pin_info->snoopable = true;
+ pin_info->error_handling_enable = false;
+ type = isys->sensor_types[type_index];
+ if (type > isys->sensor_info.vc0_data_end)
+ isys->sensor_types[type_index] =
+ isys->sensor_info.vc0_data_start;
+ }
+ break;
+ case IPU_FW_ISYS_PIN_TYPE_MIPI:
+ type_index = IPU_FW_ISYS_VC0_SENSOR_DATA;
+ pin_info->sensor_type = isys->sensor_types[type_index]++;
+ pin_info->snoopable = true;
+ pin_info->error_handling_enable = false;
+ type = isys->sensor_types[type_index];
+ if (type > isys->sensor_info.vc0_data_end)
+ isys->sensor_types[type_index] =
+ isys->sensor_info.vc0_data_start;
+
+ break;
+
+ default:
+ dev_err(&av->isys->adev->dev,
+ "Unknown pin type, use metadata type as default\n");
+
+ pin_info->sensor_type = isys->sensor_info.sensor_metadata;
+ pin_info->snoopable = true;
+ pin_info->error_handling_enable = false;
+ }
+ if (av->compression) {
+ pin_info->payload_buf_size = av->mpix.plane_fmt[0].sizeimage;
+ pin_info->reserve_compression = av->compression;
+ pin_info->ts_offsets[0] = av->ts_offsets[0];
+ }
+}
+
+static unsigned int ipu_isys_get_compression_scheme(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
+ case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
+ return 3;
+ default:
+ return 0;
+ }
+}
+
+static unsigned int get_comp_format(u32 code)
+{
+ unsigned int predictor = 0; /* currently hard coded */
+ unsigned int udt = ipu_isys_mbus_code_to_mipi(code);
+ unsigned int scheme = ipu_isys_get_compression_scheme(code);
+
+ /* if data type is not user defined return here */
+ if (udt < IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1) ||
+ udt > IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(8))
+ return 0;
+
+ /*
+ * For each user defined type (1..8) there is configuration bitfield for
+ * decompression.
+ *
+ * | bit 3 | bits 2:0 |
+ * | predictor | scheme |
+ * compression schemes:
+ * 000 = no compression
+ * 001 = 10 - 6 - 10
+ * 010 = 10 - 7 - 10
+ * 011 = 10 - 8 - 10
+ * 100 = 12 - 6 - 12
+ * 101 = 12 - 7 - 12
+ * 110 = 12 - 8 - 12
+ */
+
+ return ((predictor << 3) | scheme) <<
+ ((udt - IPU_ISYS_MIPI_CSI2_TYPE_USER_DEF(1)) * 4);
+}
+
+/* Create stream and start it using the CSS FW ABI. */
+static int start_stream_firmware(struct ipu_isys_video *av,
+ struct ipu_isys_buffer_list *bl)
+{
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct device *dev = &av->isys->adev->dev;
+ struct v4l2_subdev_selection sel_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .target = V4L2_SEL_TGT_CROP,
+ .pad = CSI2_BE_PAD_SOURCE,
+ };
+ struct ipu_fw_isys_stream_cfg_data_abi *stream_cfg;
+ struct isys_fw_msgs *msg = NULL;
+ struct ipu_fw_isys_frame_buff_set_abi *buf = NULL;
+ struct ipu_isys_queue *aq;
+ struct ipu_isys_video *isl_av = NULL;
+ struct v4l2_subdev_format source_fmt = { 0 };
+ struct v4l2_subdev *be_sd = NULL;
+ struct media_pad *source_pad = media_entity_remote_pad(&av->pad);
+ struct ipu_fw_isys_cropping_abi *crop;
+ enum ipu_fw_isys_send_type send_type;
+ int rval, rvalout, tout;
+
+ rval = get_external_facing_format(ip, &source_fmt);
+ if (rval)
+ return rval;
+
+ msg = ipu_get_fw_msg_buf(ip);
+ if (!msg)
+ return -ENOMEM;
+
+ stream_cfg = to_stream_cfg_msg_buf(msg);
+ stream_cfg->compfmt = get_comp_format(source_fmt.format.code);
+ stream_cfg->input_pins[0].input_res.width = source_fmt.format.width;
+ stream_cfg->input_pins[0].input_res.height = source_fmt.format.height;
+ stream_cfg->input_pins[0].dt =
+ ipu_isys_mbus_code_to_mipi(source_fmt.format.code);
+ stream_cfg->input_pins[0].mapped_dt = N_IPU_FW_ISYS_MIPI_DATA_TYPE;
+ stream_cfg->input_pins[0].mipi_decompression =
+ IPU_FW_ISYS_MIPI_COMPRESSION_TYPE_NO_COMPRESSION;
+ stream_cfg->input_pins[0].capture_mode =
+ IPU_FW_ISYS_CAPTURE_MODE_REGULAR;
+ if (ip->csi2 && !v4l2_ctrl_g_ctrl(ip->csi2->store_csi2_header))
+ stream_cfg->input_pins[0].mipi_store_mode =
+ IPU_FW_ISYS_MIPI_STORE_MODE_DISCARD_LONG_HEADER;
+
+ stream_cfg->src = ip->source;
+ stream_cfg->vc = 0;
+ stream_cfg->isl_use = ip->isl_mode;
+ stream_cfg->nof_input_pins = 1;
+ stream_cfg->sensor_type = IPU_FW_ISYS_SENSOR_MODE_NORMAL;
+
+ /*
+ * Only CSI2-BE and SOC BE has the capability to do crop,
+ * so get the crop info from csi2-be or csi2-be-soc.
+ */
+ if (ip->csi2_be) {
+ be_sd = &ip->csi2_be->asd.sd;
+ } else if (ip->csi2_be_soc) {
+ be_sd = &ip->csi2_be_soc->asd.sd;
+ if (source_pad)
+ sel_fmt.pad = source_pad->index;
+ }
+ crop = &stream_cfg->crop;
+ if (be_sd &&
+ !v4l2_subdev_call(be_sd, pad, get_selection, NULL, &sel_fmt)) {
+ crop->left_offset = sel_fmt.r.left;
+ crop->top_offset = sel_fmt.r.top;
+ crop->right_offset = sel_fmt.r.left + sel_fmt.r.width;
+ crop->bottom_offset = sel_fmt.r.top + sel_fmt.r.height;
+
+ } else {
+ crop->right_offset = source_fmt.format.width;
+ crop->bottom_offset = source_fmt.format.height;
+ }
+
+ /*
+ * If the CSI-2 backend's video node is part of the pipeline
+ * it must be arranged first in the output pin list. This is
+ * the most probably a firmware requirement.
+ */
+ if (ip->isl_mode == IPU_ISL_CSI2_BE)
+ isl_av = &ip->csi2_be->av;
+
+ if (isl_av) {
+ struct ipu_isys_queue *safe;
+
+ list_for_each_entry_safe(aq, safe, &ip->queues, node) {
+ struct ipu_isys_video *av = ipu_isys_queue_to_video(aq);
+
+ if (av != isl_av)
+ continue;
+
+ list_del(&aq->node);
+ list_add(&aq->node, &ip->queues);
+ break;
+ }
+ }
+
+ list_for_each_entry(aq, &ip->queues, node) {
+ struct ipu_isys_video *__av = ipu_isys_queue_to_video(aq);
+
+ __av->prepare_fw_stream(__av, stream_cfg);
+ }
+
+ if (ip->interlaced && ip->isys->short_packet_source ==
+ IPU_ISYS_SHORT_PACKET_FROM_RECEIVER)
+ csi_short_packet_prepare_fw_cfg(ip, stream_cfg);
+
+ ipu_fw_isys_dump_stream_cfg(dev, stream_cfg);
+
+ ip->nr_output_pins = stream_cfg->nof_output_pins;
+
+ rval = get_stream_handle(av);
+ if (rval) {
+ dev_dbg(dev, "Can't get stream_handle\n");
+ return rval;
+ }
+
+ reinit_completion(&ip->stream_open_completion);
+
+ ipu_fw_isys_set_params(stream_cfg);
+
+ rval = ipu_fw_isys_complex_cmd(av->isys,
+ ip->stream_handle,
+ stream_cfg,
+ to_dma_addr(msg),
+ sizeof(*stream_cfg),
+ IPU_FW_ISYS_SEND_TYPE_STREAM_OPEN);
+ ipu_put_fw_mgs_buf(av->isys, (uintptr_t)stream_cfg);
+
+ if (rval < 0) {
+ dev_err(dev, "can't open stream (%d)\n", rval);
+ goto out_put_stream_handle;
+ }
+
+ get_stream_opened(av);
+
+ tout = wait_for_completion_timeout(&ip->stream_open_completion,
+ IPU_LIB_CALL_TIMEOUT_JIFFIES);
+ if (!tout) {
+ dev_err(dev, "stream open time out\n");
+ rval = -ETIMEDOUT;
+ goto out_put_stream_opened;
+ }
+ if (ip->error) {
+ dev_err(dev, "stream open error: %d\n", ip->error);
+ rval = -EIO;
+ goto out_put_stream_opened;
+ }
+ dev_dbg(dev, "start stream: open complete\n");
+
+ if (bl) {
+ msg = ipu_get_fw_msg_buf(ip);
+ if (!msg) {
+ rval = -ENOMEM;
+ goto out_put_stream_opened;
+ }
+ buf = to_frame_msg_buf(msg);
+ }
+
+ if (bl) {
+ ipu_isys_buffer_to_fw_frame_buff(buf, ip, bl);
+ ipu_isys_buffer_list_queue(bl,
+ IPU_ISYS_BUFFER_LIST_FL_ACTIVE, 0);
+ }
+
+ reinit_completion(&ip->stream_start_completion);
+
+ if (bl) {
+ send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_START_AND_CAPTURE;
+ ipu_fw_isys_dump_frame_buff_set(dev, buf,
+ stream_cfg->nof_output_pins);
+ rval = ipu_fw_isys_complex_cmd(av->isys,
+ ip->stream_handle,
+ buf, to_dma_addr(msg),
+ sizeof(*buf),
+ send_type);
+ ipu_put_fw_mgs_buf(av->isys, (uintptr_t)buf);
+ } else {
+ send_type = IPU_FW_ISYS_SEND_TYPE_STREAM_START;
+ rval = ipu_fw_isys_simple_cmd(av->isys,
+ ip->stream_handle,
+ send_type);
+ }
+
+ if (rval < 0) {
+ dev_err(dev, "can't start streaming (%d)\n", rval);
+ goto out_stream_close;
+ }
+
+ tout = wait_for_completion_timeout(&ip->stream_start_completion,
+ IPU_LIB_CALL_TIMEOUT_JIFFIES);
+ if (!tout) {
+ dev_err(dev, "stream start time out\n");
+ rval = -ETIMEDOUT;
+ goto out_stream_close;
+ }
+ if (ip->error) {
+ dev_err(dev, "stream start error: %d\n", ip->error);
+ rval = -EIO;
+ goto out_stream_close;
+ }
+ dev_dbg(dev, "start stream: complete\n");
+
+ return 0;
+
+out_stream_close:
+ reinit_completion(&ip->stream_close_completion);
+
+ rvalout = ipu_fw_isys_simple_cmd(av->isys,
+ ip->stream_handle,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+ if (rvalout < 0) {
+ dev_dbg(dev, "can't close stream (%d)\n", rvalout);
+ goto out_put_stream_opened;
+ }
+
+ tout = wait_for_completion_timeout(&ip->stream_close_completion,
+ IPU_LIB_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_err(dev, "stream close time out\n");
+ else if (ip->error)
+ dev_err(dev, "stream close error: %d\n", ip->error);
+ else
+ dev_dbg(dev, "stream close complete\n");
+
+out_put_stream_opened:
+ put_stream_opened(av);
+
+out_put_stream_handle:
+ put_stream_handle(av);
+ return rval;
+}
+
+static void stop_streaming_firmware(struct ipu_isys_video *av)
+{
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct device *dev = &av->isys->adev->dev;
+ int rval, tout;
+ enum ipu_fw_isys_send_type send_type =
+ IPU_FW_ISYS_SEND_TYPE_STREAM_FLUSH;
+
+ reinit_completion(&ip->stream_stop_completion);
+
+ rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle,
+ send_type);
+
+ if (rval < 0) {
+ dev_err(dev, "can't stop stream (%d)\n", rval);
+ return;
+ }
+
+ tout = wait_for_completion_timeout(&ip->stream_stop_completion,
+ IPU_LIB_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_err(dev, "stream stop time out\n");
+ else if (ip->error)
+ dev_err(dev, "stream stop error: %d\n", ip->error);
+ else
+ dev_dbg(dev, "stop stream: complete\n");
+}
+
+static void close_streaming_firmware(struct ipu_isys_video *av)
+{
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct device *dev = &av->isys->adev->dev;
+ int rval, tout;
+
+ reinit_completion(&ip->stream_close_completion);
+
+ rval = ipu_fw_isys_simple_cmd(av->isys, ip->stream_handle,
+ IPU_FW_ISYS_SEND_TYPE_STREAM_CLOSE);
+ if (rval < 0) {
+ dev_err(dev, "can't close stream (%d)\n", rval);
+ return;
+ }
+
+ tout = wait_for_completion_timeout(&ip->stream_close_completion,
+ IPU_LIB_CALL_TIMEOUT_JIFFIES);
+ if (!tout)
+ dev_err(dev, "stream close time out\n");
+ else if (ip->error)
+ dev_err(dev, "stream close error: %d\n", ip->error);
+ else
+ dev_dbg(dev, "close stream: complete\n");
+
+ put_stream_opened(av);
+ put_stream_handle(av);
+}
+
+void
+ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip,
+ void (*capture_done)
+ (struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *resp))
+{
+ unsigned int i;
+
+ /* Different instances may register same function. Add only once */
+ for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++)
+ if (ip->capture_done[i] == capture_done)
+ return;
+
+ for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++) {
+ if (!ip->capture_done[i]) {
+ ip->capture_done[i] = capture_done;
+ return;
+ }
+ }
+ /*
+ * Too many call backs registered. Change to IPU_NUM_CAPTURE_DONE
+ * constant probably required.
+ */
+ WARN_ON(1);
+}
+
+int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av,
+ unsigned int state)
+{
+ struct ipu_isys *isys = av->isys;
+ struct device *dev = &isys->adev->dev;
+ struct ipu_isys_pipeline *ip;
+ struct media_graph graph;
+ struct media_entity *entity;
+ struct media_device *mdev = &av->isys->media_dev;
+ int rval;
+ unsigned int i;
+
+ dev_dbg(dev, "prepare stream: %d\n", state);
+
+ if (!state) {
+ ip = to_ipu_isys_pipeline(av->vdev.entity.pipe);
+
+ if (ip->interlaced && isys->short_packet_source ==
+ IPU_ISYS_SHORT_PACKET_FROM_RECEIVER)
+ short_packet_queue_destroy(ip);
+ media_pipeline_stop(&av->vdev.entity);
+ media_entity_enum_cleanup(&ip->entity_enum);
+ return 0;
+ }
+
+ ip = &av->ip;
+
+ WARN_ON(ip->nr_streaming);
+ ip->has_sof = false;
+ ip->nr_queues = 0;
+ ip->external = NULL;
+ atomic_set(&ip->sequence, 0);
+ ip->isl_mode = IPU_ISL_OFF;
+
+ for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++)
+ ip->capture_done[i] = NULL;
+ ip->csi2_be = NULL;
+ ip->csi2_be_soc = NULL;
+ ip->csi2 = NULL;
+ ip->seq_index = 0;
+ memset(ip->seq, 0, sizeof(ip->seq));
+
+ WARN_ON(!list_empty(&ip->queues));
+ ip->interlaced = false;
+
+ rval = media_entity_enum_init(&ip->entity_enum, mdev);
+ if (rval)
+ return rval;
+
+ rval = media_pipeline_start(&av->vdev.entity, &ip->pipe);
+ if (rval < 0) {
+ dev_dbg(dev, "pipeline start failed\n");
+ goto out_enum_cleanup;
+ }
+
+ if (!ip->external) {
+ dev_err(dev, "no external entity set! Driver bug?\n");
+ rval = -EINVAL;
+ goto out_pipeline_stop;
+ }
+
+ rval = media_graph_walk_init(&graph, mdev);
+ if (rval)
+ goto out_pipeline_stop;
+
+ /* Gather all entities in the graph. */
+ mutex_lock(&mdev->graph_mutex);
+ media_graph_walk_start(&graph, &av->vdev.entity);
+ while ((entity = media_graph_walk_next(&graph)))
+ media_entity_enum_set(&ip->entity_enum, entity);
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ media_graph_walk_cleanup(&graph);
+
+ if (ip->interlaced) {
+ rval = short_packet_queue_setup(ip);
+ if (rval) {
+ dev_err(&isys->adev->dev,
+ "Failed to setup short packet buffer.\n");
+ goto out_pipeline_stop;
+ }
+ }
+
+ dev_dbg(dev, "prepare stream: external entity %s\n",
+ ip->external->entity->name);
+
+ return 0;
+
+out_pipeline_stop:
+ media_pipeline_stop(&av->vdev.entity);
+
+out_enum_cleanup:
+ media_entity_enum_cleanup(&ip->entity_enum);
+
+ return rval;
+}
+
+static void configure_stream_watermark(struct ipu_isys_video *av)
+{
+ u32 vblank, hblank;
+ u64 pixel_rate;
+ int ret = 0;
+ struct v4l2_subdev *esd;
+ struct v4l2_ctrl *ctrl;
+ struct ipu_isys_pipeline *ip;
+ struct isys_iwake_watermark *iwake_watermark;
+ struct v4l2_control vb = { .id = V4L2_CID_VBLANK, .value = 0 };
+ struct v4l2_control hb = { .id = V4L2_CID_HBLANK, .value = 0 };
+
+ ip = to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ if (!ip->external->entity) {
+ WARN_ON(1);
+ return;
+ }
+ esd = media_entity_to_v4l2_subdev(ip->external->entity);
+
+ av->watermark->width = av->mpix.width;
+ av->watermark->height = av->mpix.height;
+
+ ret = v4l2_g_ctrl(esd->ctrl_handler, &vb);
+ if (!ret && vb.value >= 0)
+ vblank = vb.value;
+ else
+ vblank = 0;
+
+ ret = v4l2_g_ctrl(esd->ctrl_handler, &hb);
+ if (!ret && hb.value >= 0)
+ hblank = hb.value;
+ else
+ hblank = 0;
+
+ ctrl = v4l2_ctrl_find(esd->ctrl_handler, V4L2_CID_PIXEL_RATE);
+
+ if (!ctrl)
+ pixel_rate = DEFAULT_PIXEL_RATE;
+ else
+ pixel_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+
+ av->watermark->vblank = vblank;
+ av->watermark->hblank = hblank;
+ av->watermark->pixel_rate = pixel_rate;
+ if (!pixel_rate) {
+ iwake_watermark = av->isys->iwake_watermark;
+ mutex_lock(&iwake_watermark->mutex);
+ iwake_watermark->force_iwake_disable = true;
+ mutex_unlock(&iwake_watermark->mutex);
+ WARN(1, "%s Invalid pixel_rate, disable iwake.\n", __func__);
+ return;
+ }
+}
+
+static void calculate_stream_datarate(struct video_stream_watermark *watermark)
+{
+ u64 pixels_per_line, bytes_per_line, line_time_ns;
+ u64 pages_per_line, pb_bytes_per_line, stream_data_rate;
+ u16 sram_granulrity_shift =
+ (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_SRAM_GRANULRITY_SHIFT : IPU6SE_SRAM_GRANULRITY_SHIFT;
+ u16 sram_granulrity_size =
+ (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_SRAM_GRANULRITY_SIZE : IPU6SE_SRAM_GRANULRITY_SIZE;
+
+ pixels_per_line = watermark->width + watermark->hblank;
+ line_time_ns =
+ pixels_per_line * 1000 / (watermark->pixel_rate / 1000000);
+ /* 2 bytes per Bayer pixel */
+ bytes_per_line = watermark->width << 1;
+ /* bytes to IS pixel buffer pages */
+ pages_per_line = bytes_per_line >> sram_granulrity_shift;
+
+ /* pages for each line */
+ pages_per_line = DIV_ROUND_UP(bytes_per_line,
+ sram_granulrity_size);
+ pb_bytes_per_line = pages_per_line << sram_granulrity_shift;
+
+ /* data rate MB/s */
+ stream_data_rate = (pb_bytes_per_line * 1000) / line_time_ns;
+ watermark->stream_data_rate = stream_data_rate;
+}
+
+static void update_stream_watermark(struct ipu_isys_video *av, bool state)
+{
+ struct isys_iwake_watermark *iwake_watermark;
+
+ iwake_watermark = av->isys->iwake_watermark;
+ if (state) {
+ calculate_stream_datarate(av->watermark);
+ mutex_lock(&iwake_watermark->mutex);
+ list_add(&av->watermark->stream_node,
+ &iwake_watermark->video_list);
+ mutex_unlock(&iwake_watermark->mutex);
+ } else {
+ av->watermark->stream_data_rate = 0;
+ mutex_lock(&iwake_watermark->mutex);
+ list_del(&av->watermark->stream_node);
+ mutex_unlock(&iwake_watermark->mutex);
+ }
+ update_watermark_setting(av->isys);
+}
+
+int ipu_isys_video_set_streaming(struct ipu_isys_video *av,
+ unsigned int state,
+ struct ipu_isys_buffer_list *bl)
+{
+ struct device *dev = &av->isys->adev->dev;
+ struct media_device *mdev = av->vdev.entity.graph_obj.mdev;
+ struct media_entity_enum entities;
+
+ struct media_entity *entity, *entity2;
+ struct ipu_isys_pipeline *ip =
+ to_ipu_isys_pipeline(av->vdev.entity.pipe);
+ struct v4l2_subdev *sd, *esd;
+ int rval = 0;
+
+ dev_dbg(dev, "set stream: %d\n", state);
+
+ if (!ip->external->entity) {
+ WARN_ON(1);
+ return -ENODEV;
+ }
+ esd = media_entity_to_v4l2_subdev(ip->external->entity);
+
+ if (state) {
+ rval = media_graph_walk_init(&ip->graph, mdev);
+ if (rval)
+ return rval;
+ rval = media_entity_enum_init(&entities, mdev);
+ if (rval)
+ goto out_media_entity_graph_init;
+ }
+
+ if (!state) {
+ stop_streaming_firmware(av);
+
+ /* stop external sub-device now. */
+ dev_info(dev, "stream off %s\n", ip->external->entity->name);
+
+ v4l2_subdev_call(esd, video, s_stream, state);
+ }
+
+ mutex_lock(&mdev->graph_mutex);
+
+ media_graph_walk_start(&ip->graph,
+ &av->vdev.entity);
+
+ while ((entity = media_graph_walk_next(&ip->graph))) {
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ /* Non-subdev nodes can be safely ignored here. */
+ if (!is_media_entity_v4l2_subdev(entity))
+ continue;
+
+ /* Don't start truly external devices quite yet. */
+ if (strncmp(sd->name, IPU_ISYS_ENTITY_PREFIX,
+ strlen(IPU_ISYS_ENTITY_PREFIX)) != 0 ||
+ ip->external->entity == entity)
+ continue;
+
+ dev_dbg(dev, "s_stream %s entity %s\n", state ? "on" : "off",
+ entity->name);
+ rval = v4l2_subdev_call(sd, video, s_stream, state);
+ if (!state)
+ continue;
+ if (rval && rval != -ENOIOCTLCMD) {
+ mutex_unlock(&mdev->graph_mutex);
+ goto out_media_entity_stop_streaming;
+ }
+
+ media_entity_enum_set(&entities, entity);
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ if (av->aq.css_pin_type == IPU_FW_ISYS_PIN_TYPE_RAW_SOC) {
+ if (state)
+ configure_stream_watermark(av);
+ update_stream_watermark(av, state);
+ }
+
+ /* Oh crap */
+ if (state) {
+ rval = start_stream_firmware(av, bl);
+ if (rval)
+ goto out_media_entity_stop_streaming;
+
+ dev_dbg(dev, "set stream: source %d, stream_handle %d\n",
+ ip->source, ip->stream_handle);
+
+ /* Start external sub-device now. */
+ dev_info(dev, "stream on %s\n", ip->external->entity->name);
+
+ rval = v4l2_subdev_call(esd, video, s_stream, state);
+ if (rval)
+ goto out_media_entity_stop_streaming_firmware;
+ } else {
+ close_streaming_firmware(av);
+ }
+
+ if (state)
+ media_entity_enum_cleanup(&entities);
+ else
+ media_graph_walk_cleanup(&ip->graph);
+ av->streaming = state;
+
+ return 0;
+
+out_media_entity_stop_streaming_firmware:
+ stop_streaming_firmware(av);
+
+out_media_entity_stop_streaming:
+ mutex_lock(&mdev->graph_mutex);
+
+ media_graph_walk_start(&ip->graph,
+ &av->vdev.entity);
+
+ while (state && (entity2 = media_graph_walk_next(&ip->graph)) &&
+ entity2 != entity) {
+ sd = media_entity_to_v4l2_subdev(entity2);
+
+ if (!media_entity_enum_test(&entities, entity2))
+ continue;
+
+ v4l2_subdev_call(sd, video, s_stream, 0);
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ media_entity_enum_cleanup(&entities);
+
+out_media_entity_graph_init:
+ media_graph_walk_cleanup(&ip->graph);
+
+ return rval;
+}
+
+#ifdef CONFIG_COMPAT
+static long ipu_isys_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = -ENOIOCTLCMD;
+ void __user *up = compat_ptr(arg);
+
+ /*
+ * at present, there is not any private IOCTL need to compat handle
+ */
+ if (file->f_op->unlocked_ioctl)
+ ret = file->f_op->unlocked_ioctl(file, cmd, (unsigned long)up);
+
+ return ret;
+}
+#endif
+
+static const struct v4l2_ioctl_ops ioctl_ops_mplane = {
+ .vidioc_querycap = ipu_isys_vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = ipu_isys_vidioc_enum_fmt,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_vid_cap_mplane,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_default = ipu_isys_vidioc_private,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+};
+
+static const struct media_entity_operations entity_ops = {
+ .link_validate = link_validate,
+};
+
+static const struct v4l2_file_operations isys_fops = {
+ .owner = THIS_MODULE,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = ipu_isys_compat_ioctl,
+#endif
+ .mmap = vb2_fop_mmap,
+ .open = video_open,
+ .release = video_release,
+};
+
+/*
+ * Do everything that's needed to initialise things related to video
+ * buffer queue, video node, and the related media entity. The caller
+ * is expected to assign isys field and set the name of the video
+ * device.
+ */
+int ipu_isys_video_init(struct ipu_isys_video *av,
+ struct media_entity *entity,
+ unsigned int pad, unsigned long pad_flags,
+ unsigned int flags)
+{
+ const struct v4l2_ioctl_ops *ioctl_ops = NULL;
+ int rval;
+
+ mutex_init(&av->mutex);
+ init_completion(&av->ip.stream_open_completion);
+ init_completion(&av->ip.stream_close_completion);
+ init_completion(&av->ip.stream_start_completion);
+ init_completion(&av->ip.stream_stop_completion);
+ INIT_LIST_HEAD(&av->ip.queues);
+ spin_lock_init(&av->ip.short_packet_queue_lock);
+ av->ip.isys = av->isys;
+
+ if (!av->watermark) {
+ av->watermark = kzalloc(sizeof(*av->watermark), GFP_KERNEL);
+ if (!av->watermark) {
+ rval = -ENOMEM;
+ goto out_mutex_destroy;
+ }
+ }
+
+ av->vdev.device_caps = V4L2_CAP_STREAMING;
+ if (pad_flags & MEDIA_PAD_FL_SINK) {
+ av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ ioctl_ops = &ioctl_ops_mplane;
+ av->vdev.device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ av->vdev.vfl_dir = VFL_DIR_RX;
+ } else {
+ av->aq.vbq.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ av->vdev.vfl_dir = VFL_DIR_TX;
+ av->vdev.device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ }
+ rval = ipu_isys_queue_init(&av->aq);
+ if (rval)
+ goto out_mutex_destroy;
+
+ av->pad.flags = pad_flags | MEDIA_PAD_FL_MUST_CONNECT;
+ rval = media_entity_pads_init(&av->vdev.entity, 1, &av->pad);
+ if (rval)
+ goto out_ipu_isys_queue_cleanup;
+
+ av->vdev.entity.ops = &entity_ops;
+ av->vdev.release = video_device_release_empty;
+ av->vdev.fops = &isys_fops;
+ av->vdev.v4l2_dev = &av->isys->v4l2_dev;
+ if (!av->vdev.ioctl_ops)
+ av->vdev.ioctl_ops = ioctl_ops;
+ av->vdev.queue = &av->aq.vbq;
+ av->vdev.lock = &av->mutex;
+ set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
+ video_set_drvdata(&av->vdev, av);
+
+ mutex_lock(&av->mutex);
+
+ rval = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
+ if (rval)
+ goto out_media_entity_cleanup;
+
+ if (pad_flags & MEDIA_PAD_FL_SINK)
+ rval = media_create_pad_link(entity, pad,
+ &av->vdev.entity, 0, flags);
+ else
+ rval = media_create_pad_link(&av->vdev.entity, 0, entity,
+ pad, flags);
+ if (rval) {
+ dev_info(&av->isys->adev->dev, "can't create link\n");
+ goto out_media_entity_cleanup;
+ }
+
+ av->pfmt = av->try_fmt_vid_mplane(av, &av->mpix);
+
+ mutex_unlock(&av->mutex);
+
+ return rval;
+
+out_media_entity_cleanup:
+ video_unregister_device(&av->vdev);
+ mutex_unlock(&av->mutex);
+ media_entity_cleanup(&av->vdev.entity);
+
+out_ipu_isys_queue_cleanup:
+ ipu_isys_queue_cleanup(&av->aq);
+
+out_mutex_destroy:
+ kfree(av->watermark);
+ mutex_destroy(&av->mutex);
+
+ return rval;
+}
+
+void ipu_isys_video_cleanup(struct ipu_isys_video *av)
+{
+ kfree(av->watermark);
+ video_unregister_device(&av->vdev);
+ media_entity_cleanup(&av->vdev.entity);
+ mutex_destroy(&av->mutex);
+ ipu_isys_queue_cleanup(&av->aq);
+}
new file mode 100644
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_VIDEO_H
+#define IPU_ISYS_VIDEO_H
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/videodev2.h>
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "ipu-isys-queue.h"
+
+#define IPU_ISYS_OUTPUT_PINS 11
+#define IPU_NUM_CAPTURE_DONE 2
+#define IPU_ISYS_MAX_PARALLEL_SOF 2
+
+struct ipu_isys;
+struct ipu_isys_csi2_be_soc;
+struct ipu_fw_isys_stream_cfg_data_abi;
+
+struct ipu_isys_pixelformat {
+ u32 pixelformat;
+ u32 bpp;
+ u32 bpp_packed;
+ u32 bpp_planar;
+ u32 code;
+ u32 css_pixelformat;
+};
+
+struct sequence_info {
+ unsigned int sequence;
+ u64 timestamp;
+};
+
+struct output_pin_data {
+ void (*pin_ready)(struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *info);
+ struct ipu_isys_queue *aq;
+};
+
+struct ipu_isys_pipeline {
+ struct media_pipeline pipe;
+ struct media_pad *external;
+ atomic_t sequence;
+ unsigned int seq_index;
+ struct sequence_info seq[IPU_ISYS_MAX_PARALLEL_SOF];
+ int source; /* SSI stream source */
+ int stream_handle; /* stream handle for CSS API */
+ unsigned int nr_output_pins; /* How many firmware pins? */
+ enum ipu_isl_mode isl_mode;
+ struct ipu_isys_csi2_be *csi2_be;
+ struct ipu_isys_csi2_be_soc *csi2_be_soc;
+ struct ipu_isys_csi2 *csi2;
+
+ /*
+ * Number of capture queues, write access serialised using struct
+ * ipu_isys.stream_mutex
+ */
+ int nr_queues;
+ int nr_streaming; /* Number of capture queues streaming */
+ int streaming; /* Has streaming been really started? */
+ struct list_head queues;
+ struct completion stream_open_completion;
+ struct completion stream_close_completion;
+ struct completion stream_start_completion;
+ struct completion stream_stop_completion;
+ struct ipu_isys *isys;
+
+ void (*capture_done[IPU_NUM_CAPTURE_DONE])
+ (struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *resp);
+ struct output_pin_data output_pins[IPU_ISYS_OUTPUT_PINS];
+ bool has_sof;
+ bool interlaced;
+ int error;
+ struct ipu_isys_private_buffer *short_packet_bufs;
+ size_t short_packet_buffer_size;
+ unsigned int num_short_packet_lines;
+ unsigned int short_packet_output_pin;
+ unsigned int cur_field;
+ struct list_head short_packet_incoming;
+ struct list_head short_packet_active;
+ /* Serialize access to short packet active and incoming lists */
+ spinlock_t short_packet_queue_lock;
+ struct list_head pending_interlaced_bufs;
+ unsigned int short_packet_trace_index;
+ struct media_graph graph;
+ struct media_entity_enum entity_enum;
+};
+
+#define to_ipu_isys_pipeline(__pipe) \
+ container_of((__pipe), struct ipu_isys_pipeline, pipe)
+
+struct video_stream_watermark {
+ u32 width;
+ u32 height;
+ u32 vblank;
+ u32 hblank;
+ u32 frame_rate;
+ u64 pixel_rate;
+ u64 stream_data_rate;
+ struct list_head stream_node;
+};
+
+struct ipu_isys_video {
+ /* Serialise access to other fields in the struct. */
+ struct mutex mutex;
+ struct media_pad pad;
+ struct video_device vdev;
+ struct v4l2_pix_format_mplane mpix;
+ const struct ipu_isys_pixelformat *pfmts;
+ const struct ipu_isys_pixelformat *pfmt;
+ struct ipu_isys_queue aq;
+ struct ipu_isys *isys;
+ struct ipu_isys_pipeline ip;
+ unsigned int streaming;
+ bool packed;
+ bool compression;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *compression_ctrl;
+ unsigned int ts_offsets[VIDEO_MAX_PLANES];
+ unsigned int line_header_length; /* bits */
+ unsigned int line_footer_length; /* bits */
+
+ struct video_stream_watermark *watermark;
+
+ const struct ipu_isys_pixelformat *
+ (*try_fmt_vid_mplane)(struct ipu_isys_video *av,
+ struct v4l2_pix_format_mplane *mpix);
+ void (*prepare_fw_stream)(struct ipu_isys_video *av,
+ struct ipu_fw_isys_stream_cfg_data_abi *cfg);
+};
+
+#define ipu_isys_queue_to_video(__aq) \
+ container_of(__aq, struct ipu_isys_video, aq)
+
+extern const struct ipu_isys_pixelformat ipu_isys_pfmts[];
+extern const struct ipu_isys_pixelformat ipu_isys_pfmts_be_soc[];
+extern const struct ipu_isys_pixelformat ipu_isys_pfmts_packed[];
+
+const struct ipu_isys_pixelformat *
+ipu_isys_get_pixelformat(struct ipu_isys_video *av, u32 pixelformat);
+
+int ipu_isys_vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap);
+
+int ipu_isys_vidioc_enum_fmt(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane_default(struct ipu_isys_video *av,
+ struct v4l2_pix_format_mplane *mpix);
+
+const struct ipu_isys_pixelformat *
+ipu_isys_video_try_fmt_vid_mplane(struct ipu_isys_video *av,
+ struct v4l2_pix_format_mplane *mpix,
+ int store_csi2_header);
+
+void
+ipu_isys_prepare_fw_cfg_default(struct ipu_isys_video *av,
+ struct ipu_fw_isys_stream_cfg_data_abi *cfg);
+int ipu_isys_video_prepare_streaming(struct ipu_isys_video *av,
+ unsigned int state);
+int ipu_isys_video_set_streaming(struct ipu_isys_video *av, unsigned int state,
+ struct ipu_isys_buffer_list *bl);
+int ipu_isys_video_init(struct ipu_isys_video *av, struct media_entity *source,
+ unsigned int source_pad, unsigned long pad_flags,
+ unsigned int flags);
+void ipu_isys_video_cleanup(struct ipu_isys_video *av);
+void ipu_isys_video_add_capture_done(struct ipu_isys_pipeline *ip,
+ void (*capture_done)
+ (struct ipu_isys_pipeline *ip,
+ struct ipu_fw_isys_resp_info_abi *resp));
+
+#endif /* IPU_ISYS_VIDEO_H */
new file mode 100644
@@ -0,0 +1,1355 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2021 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/version.h>
+
+#include <media/ipu-isys.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-async.h>
+#include "ipu.h"
+#include "ipu-bus.h"
+#include "ipu-cpd.h"
+#include "ipu-mmu.h"
+#include "ipu-dma.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-isys-video.h"
+#include "ipu-platform-regs.h"
+#include "ipu-buttress.h"
+#include "ipu-platform.h"
+#include "ipu-platform-buttress-regs.h"
+
+#define ISYS_PM_QOS_VALUE 300
+
+#define IPU_BUTTRESS_FABIC_CONTROL 0x68
+#define GDA_ENABLE_IWAKE_INDEX 2
+#define GDA_IWAKE_THRESHOLD_INDEX 1
+#define GDA_IRQ_CRITICAL_THRESHOLD_INDEX 0
+
+/* LTR & DID value are 10 bit at most */
+#define LTR_DID_VAL_MAX 1023
+#define LTR_DEFAULT_VALUE 0x70503C19
+#define FILL_TIME_DEFAULT_VALUE 0xFFF0783C
+#define LTR_DID_PKGC_2R 20
+#define LTR_DID_PKGC_8 100
+#define LTR_SCALE_DEFAULT 5
+#define LTR_SCALE_1024NS 2
+#define REG_PKGC_PMON_CFG 0xB00
+
+#define VAL_PKGC_PMON_CFG_RESET 0x38
+#define VAL_PKGC_PMON_CFG_START 0x7
+
+#define IS_PIXEL_BUFFER_PAGES 0x80
+/* BIOS provides the driver the LTR and threshold information in IPU,
+ * IS pixel buffer is 256KB, MaxSRAMSize is 200KB on IPU6.
+ */
+#define IPU6_MAX_SRAM_SIZE (200 << 10)
+/* IS pixel buffer is 128KB, MaxSRAMSize is 96KB on IPU6SE.
+ */
+#define IPU6SE_MAX_SRAM_SIZE (96 << 10)
+/* When iwake mode is disabled the critical threshold is statically set to 75%
+ * of the IS pixel buffer criticalThreshold = (128 * 3) / 4
+ */
+#define CRITICAL_THRESHOLD_IWAKE_DISABLE (IS_PIXEL_BUFFER_PAGES * 3 / 4)
+
+union fabric_ctrl {
+ struct {
+ u16 ltr_val : 10;
+ u16 ltr_scale : 3;
+ u16 RSVD1 : 3;
+ u16 did_val : 10;
+ u16 did_scale : 3;
+ u16 RSVD2 : 1;
+ u16 keep_power_in_D0 : 1;
+ u16 keep_power_override : 1;
+ } bits;
+ u32 value;
+};
+
+enum ltr_did_type {
+ LTR_IWAKE_ON,
+ LTR_IWAKE_OFF,
+ LTR_ISYS_ON,
+ LTR_ISYS_OFF,
+ LTR_TYPE_MAX
+};
+
+static int
+isys_complete_ext_device_registration(struct ipu_isys *isys,
+ struct v4l2_subdev *sd,
+ struct ipu_isys_csi2_config *csi2)
+{
+ unsigned int i;
+ int rval;
+
+ v4l2_set_subdev_hostdata(sd, csi2);
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ if (sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
+ break;
+ }
+
+ if (i == sd->entity.num_pads) {
+ dev_warn(&isys->adev->dev,
+ "no source pad in external entity\n");
+ rval = -ENOENT;
+ goto skip_unregister_subdev;
+ }
+
+ rval = media_create_pad_link(&sd->entity, i,
+ &isys->csi2[csi2->port].asd.sd.entity,
+ 0, 0);
+ if (rval) {
+ dev_warn(&isys->adev->dev, "can't create link\n");
+ goto skip_unregister_subdev;
+ }
+
+ isys->csi2[csi2->port].nlanes = csi2->nlanes;
+ return 0;
+
+skip_unregister_subdev:
+ v4l2_device_unregister_subdev(sd);
+ return rval;
+}
+
+static void isys_unregister_subdevices(struct ipu_isys *isys)
+{
+ const struct ipu_isys_internal_csi2_pdata *csi2 =
+ &isys->pdata->ipdata->csi2;
+ unsigned int i;
+
+ for (i = 0; i < NR_OF_CSI2_BE_SOC_DEV; i++)
+ ipu_isys_csi2_be_soc_cleanup(&isys->csi2_be_soc[i]);
+
+ for (i = 0; i < csi2->nports; i++)
+ ipu_isys_csi2_cleanup(&isys->csi2[i]);
+}
+
+static int isys_register_subdevices(struct ipu_isys *isys)
+{
+ const struct ipu_isys_internal_csi2_pdata *csi2 =
+ &isys->pdata->ipdata->csi2;
+ struct ipu_isys_csi2_be_soc *csi2_be_soc;
+ unsigned int i, k;
+ int rval;
+
+ isys->csi2 = devm_kcalloc(&isys->adev->dev, csi2->nports,
+ sizeof(*isys->csi2), GFP_KERNEL);
+ if (!isys->csi2) {
+ rval = -ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < csi2->nports; i++) {
+ rval = ipu_isys_csi2_init(&isys->csi2[i], isys,
+ isys->pdata->base +
+ csi2->offsets[i], i);
+ if (rval)
+ goto fail;
+
+ isys->isr_csi2_bits |= IPU_ISYS_UNISPART_IRQ_CSI2(i);
+ }
+
+ for (k = 0; k < NR_OF_CSI2_BE_SOC_DEV; k++) {
+ rval = ipu_isys_csi2_be_soc_init(&isys->csi2_be_soc[k],
+ isys, k);
+ if (rval) {
+ dev_info(&isys->adev->dev,
+ "can't register csi2 soc be device %d\n", k);
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < csi2->nports; i++) {
+ for (k = 0; k < NR_OF_CSI2_BE_SOC_DEV; k++) {
+ csi2_be_soc = &isys->csi2_be_soc[k];
+ rval =
+ media_create_pad_link(&isys->csi2[i].asd.sd.entity,
+ CSI2_PAD_SOURCE,
+ &csi2_be_soc->asd.sd.entity,
+ CSI2_BE_SOC_PAD_SINK, 0);
+ if (rval) {
+ dev_info(&isys->adev->dev,
+ "can't create link csi2->be_soc\n");
+ goto fail;
+ }
+ }
+ }
+
+ return 0;
+
+fail:
+ isys_unregister_subdevices(isys);
+ return rval;
+}
+
+/* read ltrdid threshold values from BIOS or system configuration */
+static void get_lut_ltrdid(struct ipu_isys *isys, struct ltr_did *pltr_did)
+{
+ struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+ /* default values*/
+ struct ltr_did ltrdid_default;
+
+ ltrdid_default.lut_ltr.value = LTR_DEFAULT_VALUE;
+ ltrdid_default.lut_fill_time.value = FILL_TIME_DEFAULT_VALUE;
+
+ if (iwake_watermark->ltrdid.lut_ltr.value)
+ *pltr_did = iwake_watermark->ltrdid;
+ else
+ *pltr_did = ltrdid_default;
+}
+
+static int set_iwake_register(struct ipu_isys *isys, u32 index, u32 value)
+{
+ int ret = 0;
+ u32 req_id = index;
+ u32 offset = 0;
+
+ ret = ipu_fw_isys_send_proxy_token(isys, req_id, index, offset, value);
+ if (ret)
+ dev_err(&isys->adev->dev, "write %d failed %d", index, ret);
+
+ return ret;
+}
+
+/*
+ * When input system is powered up and before enabling any new sensor capture,
+ * or after disabling any sensor capture the following values need to be set:
+ * LTR_value = LTR(usec) from calculation;
+ * LTR_scale = 2;
+ * DID_value = DID(usec) from calculation;
+ * DID_scale = 2;
+ *
+ * When input system is powered down, the LTR and DID values
+ * must be returned to the default values:
+ * LTR_value = 1023;
+ * LTR_scale = 5;
+ * DID_value = 1023;
+ * DID_scale = 2;
+ */
+static void set_iwake_ltrdid(struct ipu_isys *isys,
+ u16 ltr,
+ u16 did,
+ enum ltr_did_type use)
+{
+ /* did_scale will set to 2= 1us */
+ u16 ltr_val, ltr_scale, did_val;
+ union fabric_ctrl fc;
+ struct ipu_device *isp = isys->adev->isp;
+
+ switch (use) {
+ case LTR_IWAKE_ON:
+ ltr_val = min_t(u16, ltr, (u16)LTR_DID_VAL_MAX);
+ did_val = min_t(u16, did, (u16)LTR_DID_VAL_MAX);
+ ltr_scale = (ltr == LTR_DID_VAL_MAX &&
+ did == LTR_DID_VAL_MAX) ?
+ LTR_SCALE_DEFAULT : LTR_SCALE_1024NS;
+ break;
+ case LTR_ISYS_ON:
+ case LTR_IWAKE_OFF:
+ ltr_val = LTR_DID_PKGC_2R;
+ did_val = LTR_DID_PKGC_2R;
+ ltr_scale = LTR_SCALE_1024NS;
+ break;
+ case LTR_ISYS_OFF:
+ ltr_val = LTR_DID_VAL_MAX;
+ did_val = LTR_DID_VAL_MAX;
+ ltr_scale = LTR_SCALE_DEFAULT;
+ break;
+ default:
+ return;
+ }
+
+ fc.value = readl(isp->base + IPU_BUTTRESS_FABIC_CONTROL);
+ fc.bits.ltr_val = ltr_val;
+ fc.bits.ltr_scale = ltr_scale;
+ fc.bits.did_val = did_val;
+ fc.bits.did_scale = 2;
+ dev_dbg(&isys->adev->dev,
+ "%s ltr: %d did: %d", __func__, ltr_val, did_val);
+ writel(fc.value, isp->base + IPU_BUTTRESS_FABIC_CONTROL);
+}
+
+/* SW driver may clear register GDA_ENABLE_IWAKE before the FW configures the
+ * stream for debug purposes. Otherwise SW should not access this register.
+ */
+static int enable_iwake(struct ipu_isys *isys, bool enable)
+{
+ int ret = 0;
+ struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+
+ mutex_lock(&iwake_watermark->mutex);
+ if (iwake_watermark->iwake_enabled == enable) {
+ mutex_unlock(&iwake_watermark->mutex);
+ return ret;
+ }
+ ret = set_iwake_register(isys, GDA_ENABLE_IWAKE_INDEX, enable);
+ if (!ret)
+ iwake_watermark->iwake_enabled = enable;
+ mutex_unlock(&iwake_watermark->mutex);
+ return ret;
+}
+
+void update_watermark_setting(struct ipu_isys *isys)
+{
+ struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+ struct list_head *stream_node;
+ struct video_stream_watermark *p_watermark;
+ struct ltr_did ltrdid;
+ u16 calc_fill_time_us = 0;
+ u16 ltr = 0;
+ u16 did = 0;
+ u32 iwake_threshold, iwake_critical_threshold;
+ u64 threshold_bytes;
+ u64 isys_pb_datarate_mbs = 0;
+ u16 sram_granulrity_shift =
+ (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_SRAM_GRANULRITY_SHIFT : IPU6SE_SRAM_GRANULRITY_SHIFT;
+ int max_sram_size =
+ (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_MAX_SRAM_SIZE : IPU6SE_MAX_SRAM_SIZE;
+
+ mutex_lock(&iwake_watermark->mutex);
+ if (iwake_watermark->force_iwake_disable) {
+ set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+ set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+ CRITICAL_THRESHOLD_IWAKE_DISABLE);
+ mutex_unlock(&iwake_watermark->mutex);
+ return;
+ }
+
+ if (list_empty(&iwake_watermark->video_list)) {
+ isys_pb_datarate_mbs = 0;
+ } else {
+ list_for_each(stream_node, &iwake_watermark->video_list)
+ {
+ p_watermark = list_entry(stream_node,
+ struct video_stream_watermark,
+ stream_node);
+ isys_pb_datarate_mbs += p_watermark->stream_data_rate;
+ }
+ }
+ mutex_unlock(&iwake_watermark->mutex);
+
+ if (!isys_pb_datarate_mbs) {
+ enable_iwake(isys, false);
+ set_iwake_ltrdid(isys, 0, 0, LTR_IWAKE_OFF);
+ mutex_lock(&iwake_watermark->mutex);
+ set_iwake_register(isys, GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+ CRITICAL_THRESHOLD_IWAKE_DISABLE);
+ mutex_unlock(&iwake_watermark->mutex);
+ } else {
+ /* should enable iwake by default according to FW */
+ enable_iwake(isys, true);
+ calc_fill_time_us = (u16)(max_sram_size / isys_pb_datarate_mbs);
+ get_lut_ltrdid(isys, <rdid);
+
+ if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th0)
+ ltr = 0;
+ else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th1)
+ ltr = ltrdid.lut_ltr.bits.val0;
+ else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th2)
+ ltr = ltrdid.lut_ltr.bits.val1;
+ else if (calc_fill_time_us <= ltrdid.lut_fill_time.bits.th3)
+ ltr = ltrdid.lut_ltr.bits.val2;
+ else
+ ltr = ltrdid.lut_ltr.bits.val3;
+
+ did = calc_fill_time_us - ltr;
+
+ threshold_bytes = did * isys_pb_datarate_mbs;
+ /* calculate iwake threshold with 2KB granularity pages */
+ iwake_threshold =
+ max_t(u32, 1, threshold_bytes >> sram_granulrity_shift);
+
+ iwake_threshold = min_t(u32, iwake_threshold, max_sram_size);
+
+ /* set the critical threshold to halfway between
+ * iwake threshold and the full buffer.
+ */
+ iwake_critical_threshold = iwake_threshold +
+ (IS_PIXEL_BUFFER_PAGES - iwake_threshold) / 2;
+
+ dev_dbg(&isys->adev->dev, "%s threshold: %u critical: %u",
+ __func__, iwake_threshold, iwake_critical_threshold);
+ set_iwake_ltrdid(isys, ltr, did, LTR_IWAKE_ON);
+ mutex_lock(&iwake_watermark->mutex);
+ set_iwake_register(isys,
+ GDA_IWAKE_THRESHOLD_INDEX, iwake_threshold);
+
+ set_iwake_register(isys,
+ GDA_IRQ_CRITICAL_THRESHOLD_INDEX,
+ iwake_critical_threshold);
+ mutex_unlock(&iwake_watermark->mutex);
+
+ writel(VAL_PKGC_PMON_CFG_RESET,
+ isys->adev->isp->base + REG_PKGC_PMON_CFG);
+ writel(VAL_PKGC_PMON_CFG_START,
+ isys->adev->isp->base + REG_PKGC_PMON_CFG);
+ }
+}
+
+static int isys_iwake_watermark_init(struct ipu_isys *isys)
+{
+ struct isys_iwake_watermark *iwake_watermark;
+
+ if (isys->iwake_watermark)
+ return 0;
+
+ iwake_watermark = devm_kzalloc(&isys->adev->dev,
+ sizeof(*iwake_watermark), GFP_KERNEL);
+ if (!iwake_watermark)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&iwake_watermark->video_list);
+ mutex_init(&iwake_watermark->mutex);
+
+ iwake_watermark->ltrdid.lut_ltr.value = 0;
+ isys->iwake_watermark = iwake_watermark;
+ iwake_watermark->isys = isys;
+ iwake_watermark->iwake_enabled = false;
+ iwake_watermark->force_iwake_disable = false;
+ return 0;
+}
+
+static int isys_iwake_watermark_cleanup(struct ipu_isys *isys)
+{
+ struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+
+ if (!iwake_watermark)
+ return -EINVAL;
+ mutex_lock(&iwake_watermark->mutex);
+ list_del(&iwake_watermark->video_list);
+ mutex_unlock(&iwake_watermark->mutex);
+ mutex_destroy(&iwake_watermark->mutex);
+ isys->iwake_watermark = NULL;
+ return 0;
+}
+
+/* The .bound() notifier callback when a match is found */
+static int isys_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct ipu_isys *isys = container_of(notifier,
+ struct ipu_isys, notifier);
+ struct sensor_async_subdev *s_asd = container_of(asd,
+ struct sensor_async_subdev, asd);
+
+ dev_info(&isys->adev->dev, "bind %s nlanes is %d port is %d\n",
+ sd->name, s_asd->csi2.nlanes, s_asd->csi2.port);
+ isys_complete_ext_device_registration(isys, sd, &s_asd->csi2);
+
+ return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static void isys_notifier_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct ipu_isys *isys = container_of(notifier,
+ struct ipu_isys, notifier);
+
+ dev_info(&isys->adev->dev, "unbind %s\n", sd->name);
+}
+
+static int isys_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct ipu_isys *isys = container_of(notifier,
+ struct ipu_isys, notifier);
+
+ dev_info(&isys->adev->dev, "All sensor registration completed.\n");
+
+ return v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations isys_async_ops = {
+ .bound = isys_notifier_bound,
+ .unbind = isys_notifier_unbind,
+ .complete = isys_notifier_complete,
+};
+
+static int isys_fwnode_parse(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
+{
+ struct sensor_async_subdev *s_asd =
+ container_of(asd, struct sensor_async_subdev, asd);
+
+ s_asd->csi2.port = vep->base.port;
+ s_asd->csi2.nlanes = vep->bus.mipi_csi2.num_data_lanes;
+
+ return 0;
+}
+
+static int isys_notifier_init(struct ipu_isys *isys)
+{
+ struct ipu_device *isp = isys->adev->isp;
+ size_t asd_struct_size = sizeof(struct sensor_async_subdev);
+ int ret;
+
+ v4l2_async_notifier_init(&isys->notifier);
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(&isp->pdev->dev,
+ &isys->notifier,
+ asd_struct_size,
+ isys_fwnode_parse);
+
+ if (ret < 0) {
+ dev_err(&isys->adev->dev,
+ "v4l2 parse_fwnode_endpoints() failed: %d\n", ret);
+ return ret;
+ }
+
+ if (list_empty(&isys->notifier.asd_list)) {
+ /* isys probe could continue with async subdevs missing */
+ dev_warn(&isys->adev->dev, "no subdev found in graph\n");
+ return 0;
+ }
+
+ isys->notifier.ops = &isys_async_ops;
+ ret = v4l2_async_notifier_register(&isys->v4l2_dev, &isys->notifier);
+ if (ret) {
+ dev_err(&isys->adev->dev,
+ "failed to register async notifier : %d\n", ret);
+ v4l2_async_notifier_cleanup(&isys->notifier);
+ }
+
+ return ret;
+}
+
+static void isys_notifier_cleanup(struct ipu_isys *isys)
+{
+ v4l2_async_notifier_unregister(&isys->notifier);
+ v4l2_async_notifier_cleanup(&isys->notifier);
+}
+
+static struct media_device_ops isys_mdev_ops = {
+ .link_notify = v4l2_pipeline_link_notify,
+};
+
+static int isys_register_devices(struct ipu_isys *isys)
+{
+ int rval;
+
+ isys->media_dev.dev = &isys->adev->dev;
+ isys->media_dev.ops = &isys_mdev_ops;
+ strlcpy(isys->media_dev.model,
+ IPU_MEDIA_DEV_MODEL_NAME, sizeof(isys->media_dev.model));
+ snprintf(isys->media_dev.bus_info, sizeof(isys->media_dev.bus_info),
+ "pci:%s", dev_name(isys->adev->dev.parent->parent));
+ strlcpy(isys->v4l2_dev.name, isys->media_dev.model,
+ sizeof(isys->v4l2_dev.name));
+
+ media_device_init(&isys->media_dev);
+
+ rval = media_device_register(&isys->media_dev);
+ if (rval < 0) {
+ dev_info(&isys->adev->dev, "can't register media device\n");
+ goto out_media_device_unregister;
+ }
+
+ isys->v4l2_dev.mdev = &isys->media_dev;
+
+ rval = v4l2_device_register(&isys->adev->dev, &isys->v4l2_dev);
+ if (rval < 0) {
+ dev_info(&isys->adev->dev, "can't register v4l2 device\n");
+ goto out_media_device_unregister;
+ }
+
+ rval = isys_register_subdevices(isys);
+ if (rval)
+ goto out_v4l2_device_unregister;
+
+ rval = isys_notifier_init(isys);
+ if (rval)
+ goto out_isys_unregister_subdevices;
+
+ rval = v4l2_device_register_subdev_nodes(&isys->v4l2_dev);
+ if (rval)
+ goto out_isys_notifier_cleanup;
+
+ return 0;
+
+out_isys_notifier_cleanup:
+ isys_notifier_cleanup(isys);
+
+out_isys_unregister_subdevices:
+ isys_unregister_subdevices(isys);
+
+out_v4l2_device_unregister:
+ v4l2_device_unregister(&isys->v4l2_dev);
+
+out_media_device_unregister:
+ media_device_unregister(&isys->media_dev);
+ media_device_cleanup(&isys->media_dev);
+
+ return rval;
+}
+
+static void isys_unregister_devices(struct ipu_isys *isys)
+{
+ isys_unregister_subdevices(isys);
+ v4l2_device_unregister(&isys->v4l2_dev);
+ media_device_unregister(&isys->media_dev);
+ media_device_cleanup(&isys->media_dev);
+}
+
+#ifdef CONFIG_PM
+static int isys_runtime_pm_resume(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_device *isp = adev->isp;
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+ unsigned long flags;
+ int ret;
+
+ if (!isys)
+ return 0;
+
+ ret = ipu_mmu_hw_init(adev->mmu);
+ if (ret)
+ return ret;
+
+ ipu_trace_restore(dev);
+
+ cpu_latency_qos_update_request(&isys->pm_qos, ISYS_PM_QOS_VALUE);
+
+ ret = ipu_buttress_start_tsc_sync(isp);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->power = 1;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+
+ if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) {
+ mutex_lock(&isys->short_packet_tracing_mutex);
+ isys->short_packet_tracing_count = 0;
+ mutex_unlock(&isys->short_packet_tracing_mutex);
+ }
+ isys_setup_hw(isys);
+
+ set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_ON);
+ return 0;
+}
+
+static int isys_runtime_pm_suspend(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+ unsigned long flags;
+
+ if (!isys)
+ return 0;
+
+ spin_lock_irqsave(&isys->power_lock, flags);
+ isys->power = 0;
+ spin_unlock_irqrestore(&isys->power_lock, flags);
+
+ ipu_trace_stop(dev);
+ mutex_lock(&isys->mutex);
+ isys->reset_needed = false;
+ mutex_unlock(&isys->mutex);
+
+ cpu_latency_qos_update_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ ipu_mmu_hw_cleanup(adev->mmu);
+
+ set_iwake_ltrdid(isys, 0, 0, LTR_ISYS_OFF);
+ return 0;
+}
+
+static int isys_suspend(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+
+ /* If stream is open, refuse to suspend */
+ if (isys->stream_opened)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int isys_resume(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops isys_pm_ops = {
+ .runtime_suspend = isys_runtime_pm_suspend,
+ .runtime_resume = isys_runtime_pm_resume,
+ .suspend = isys_suspend,
+ .resume = isys_resume,
+};
+
+#define ISYS_PM_OPS (&isys_pm_ops)
+#else
+#define ISYS_PM_OPS NULL
+#endif
+
+static void isys_remove(struct ipu_bus_device *adev)
+{
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+ struct ipu_device *isp = adev->isp;
+ struct isys_fw_msgs *fwmsg, *safe;
+
+ dev_info(&adev->dev, "removed\n");
+#ifdef CONFIG_DEBUG_FS
+ if (isp->ipu_dir)
+ debugfs_remove_recursive(isys->debugfsdir);
+#endif
+
+ list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist, head) {
+ dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs),
+ fwmsg, fwmsg->dma_addr,
+ 0);
+ }
+
+ list_for_each_entry_safe(fwmsg, safe, &isys->framebuflist_fw, head) {
+ dma_free_attrs(&adev->dev, sizeof(struct isys_fw_msgs),
+ fwmsg, fwmsg->dma_addr,
+ 0
+ );
+ }
+
+ isys_iwake_watermark_cleanup(isys);
+
+ ipu_trace_uninit(&adev->dev);
+ isys_notifier_cleanup(isys);
+ isys_unregister_devices(isys);
+
+ cpu_latency_qos_remove_request(&isys->pm_qos);
+
+ if (!isp->secure_mode) {
+ ipu_cpd_free_pkg_dir(adev, isys->pkg_dir,
+ isys->pkg_dir_dma_addr,
+ isys->pkg_dir_size);
+ ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt);
+ release_firmware(isys->fw);
+ }
+
+ mutex_destroy(&isys->stream_mutex);
+ mutex_destroy(&isys->mutex);
+
+ if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT) {
+ u32 trace_size = IPU_ISYS_SHORT_PACKET_TRACE_BUFFER_SIZE;
+
+ dma_free_coherent(&adev->dev, trace_size,
+ isys->short_packet_trace_buffer,
+ isys->short_packet_trace_buffer_dma_addr);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int ipu_isys_icache_prefetch_get(void *data, u64 *val)
+{
+ struct ipu_isys *isys = data;
+
+ *val = isys->icache_prefetch;
+ return 0;
+}
+
+static int ipu_isys_icache_prefetch_set(void *data, u64 val)
+{
+ struct ipu_isys *isys = data;
+
+ if (val != !!val)
+ return -EINVAL;
+
+ isys->icache_prefetch = val;
+
+ return 0;
+}
+
+static int isys_iwake_control_get(void *data, u64 *val)
+{
+ struct ipu_isys *isys = data;
+ struct isys_iwake_watermark *iwake_watermark = isys->iwake_watermark;
+
+ mutex_lock(&iwake_watermark->mutex);
+ *val = isys->iwake_watermark->force_iwake_disable;
+ mutex_unlock(&iwake_watermark->mutex);
+ return 0;
+}
+
+static int isys_iwake_control_set(void *data, u64 val)
+{
+ struct ipu_isys *isys = data;
+ struct isys_iwake_watermark *iwake_watermark;
+
+ if (val != !!val)
+ return -EINVAL;
+ /* If stream is open, refuse to set iwake */
+ if (isys->stream_opened)
+ return -EBUSY;
+
+ iwake_watermark = isys->iwake_watermark;
+ mutex_lock(&iwake_watermark->mutex);
+ isys->iwake_watermark->force_iwake_disable = !!val;
+ mutex_unlock(&iwake_watermark->mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_icache_prefetch_fops,
+ ipu_isys_icache_prefetch_get,
+ ipu_isys_icache_prefetch_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_iwake_control_fops,
+ isys_iwake_control_get,
+ isys_iwake_control_set, "%llu\n");
+
+static int ipu_isys_init_debugfs(struct ipu_isys *isys)
+{
+ struct dentry *file;
+ struct dentry *dir;
+#ifdef IPU_ISYS_GPC
+ int ret;
+#endif
+
+ dir = debugfs_create_dir("isys", isys->adev->isp->ipu_dir);
+ if (IS_ERR(dir))
+ return -ENOMEM;
+
+ file = debugfs_create_file("icache_prefetch", 0600,
+ dir, isys, &isys_icache_prefetch_fops);
+ if (IS_ERR(file))
+ goto err;
+
+ file = debugfs_create_file("iwake_disable", 0600,
+ dir, isys, &isys_iwake_control_fops);
+ if (IS_ERR(file))
+ goto err;
+
+ isys->debugfsdir = dir;
+
+#ifdef IPU_ISYS_GPC
+ ret = ipu_isys_gpc_init_debugfs(isys);
+ if (ret)
+ return ret;
+#endif
+
+ return 0;
+err:
+ debugfs_remove_recursive(dir);
+ return -ENOMEM;
+}
+#endif
+
+static int alloc_fw_msg_bufs(struct ipu_isys *isys, int amount)
+{
+ dma_addr_t dma_addr;
+ struct isys_fw_msgs *addr;
+ unsigned int i;
+ unsigned long flags;
+
+ for (i = 0; i < amount; i++) {
+ addr = dma_alloc_attrs(&isys->adev->dev,
+ sizeof(struct isys_fw_msgs),
+ &dma_addr, GFP_KERNEL,
+ 0);
+ if (!addr)
+ break;
+ addr->dma_addr = dma_addr;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ list_add(&addr->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ }
+ if (i == amount)
+ return 0;
+ spin_lock_irqsave(&isys->listlock, flags);
+ while (!list_empty(&isys->framebuflist)) {
+ addr = list_first_entry(&isys->framebuflist,
+ struct isys_fw_msgs, head);
+ list_del(&addr->head);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dma_free_attrs(&isys->adev->dev,
+ sizeof(struct isys_fw_msgs),
+ addr, addr->dma_addr,
+ 0);
+ spin_lock_irqsave(&isys->listlock, flags);
+ }
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ return -ENOMEM;
+}
+
+struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip)
+{
+ struct ipu_isys_video *pipe_av =
+ container_of(ip, struct ipu_isys_video, ip);
+ struct ipu_isys *isys;
+ struct isys_fw_msgs *msg;
+ unsigned long flags;
+
+ isys = pipe_av->isys;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ if (list_empty(&isys->framebuflist)) {
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dev_dbg(&isys->adev->dev, "Frame list empty - Allocate more");
+
+ alloc_fw_msg_bufs(isys, 5);
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ if (list_empty(&isys->framebuflist)) {
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ dev_err(&isys->adev->dev, "Frame list empty");
+ return NULL;
+ }
+ }
+ msg = list_last_entry(&isys->framebuflist, struct isys_fw_msgs, head);
+ list_move(&msg->head, &isys->framebuflist_fw);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+ memset(&msg->fw_msg, 0, sizeof(msg->fw_msg));
+
+ return msg;
+}
+
+void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys)
+{
+ struct isys_fw_msgs *fwmsg, *fwmsg0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ list_for_each_entry_safe(fwmsg, fwmsg0, &isys->framebuflist_fw, head)
+ list_move(&fwmsg->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+void ipu_put_fw_mgs_buf(struct ipu_isys *isys, u64 data)
+{
+ struct isys_fw_msgs *msg;
+ unsigned long flags;
+ u64 *ptr = (u64 *)(unsigned long)data;
+
+ if (!ptr)
+ return;
+
+ spin_lock_irqsave(&isys->listlock, flags);
+ msg = container_of(ptr, struct isys_fw_msgs, fw_msg.dummy);
+ list_move(&msg->head, &isys->framebuflist);
+ spin_unlock_irqrestore(&isys->listlock, flags);
+}
+
+static int isys_probe(struct ipu_bus_device *adev)
+{
+ struct ipu_isys *isys;
+ struct ipu_device *isp = adev->isp;
+ const struct firmware *fw;
+ int rval = 0;
+
+ isys = devm_kzalloc(&adev->dev, sizeof(*isys), GFP_KERNEL);
+ if (!isys)
+ return -ENOMEM;
+
+ rval = ipu_mmu_hw_init(adev->mmu);
+ if (rval)
+ return rval;
+
+ /* By default, short packet is captured from T-Unit. */
+ isys->short_packet_source = IPU_ISYS_SHORT_PACKET_FROM_RECEIVER;
+ isys->adev = adev;
+ isys->pdata = adev->pdata;
+
+ /* initial streamID for different sensor types */
+ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) {
+ isys->sensor_info.vc1_data_start =
+ IPU6_FW_ISYS_VC1_SENSOR_DATA_START;
+ isys->sensor_info.vc1_data_end =
+ IPU6_FW_ISYS_VC1_SENSOR_DATA_END;
+ isys->sensor_info.vc0_data_start =
+ IPU6_FW_ISYS_VC0_SENSOR_DATA_START;
+ isys->sensor_info.vc0_data_end =
+ IPU6_FW_ISYS_VC0_SENSOR_DATA_END;
+ isys->sensor_info.vc1_pdaf_start =
+ IPU6_FW_ISYS_VC1_SENSOR_PDAF_START;
+ isys->sensor_info.vc1_pdaf_end =
+ IPU6_FW_ISYS_VC1_SENSOR_PDAF_END;
+ isys->sensor_info.sensor_metadata =
+ IPU6_FW_ISYS_SENSOR_METADATA;
+
+ isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_DATA] =
+ IPU6_FW_ISYS_VC1_SENSOR_DATA_START;
+ isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_PDAF] =
+ IPU6_FW_ISYS_VC1_SENSOR_PDAF_START;
+ isys->sensor_types[IPU_FW_ISYS_VC0_SENSOR_DATA] =
+ IPU6_FW_ISYS_VC0_SENSOR_DATA_START;
+ } else if (ipu_ver == IPU_VER_6SE) {
+ isys->sensor_info.vc1_data_start =
+ IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START;
+ isys->sensor_info.vc1_data_end =
+ IPU6SE_FW_ISYS_VC1_SENSOR_DATA_END;
+ isys->sensor_info.vc0_data_start =
+ IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START;
+ isys->sensor_info.vc0_data_end =
+ IPU6SE_FW_ISYS_VC0_SENSOR_DATA_END;
+ isys->sensor_info.vc1_pdaf_start =
+ IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START;
+ isys->sensor_info.vc1_pdaf_end =
+ IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_END;
+ isys->sensor_info.sensor_metadata =
+ IPU6SE_FW_ISYS_SENSOR_METADATA;
+
+ isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_DATA] =
+ IPU6SE_FW_ISYS_VC1_SENSOR_DATA_START;
+ isys->sensor_types[IPU_FW_ISYS_VC1_SENSOR_PDAF] =
+ IPU6SE_FW_ISYS_VC1_SENSOR_PDAF_START;
+ isys->sensor_types[IPU_FW_ISYS_VC0_SENSOR_DATA] =
+ IPU6SE_FW_ISYS_VC0_SENSOR_DATA_START;
+ }
+
+ INIT_LIST_HEAD(&isys->requests);
+
+ spin_lock_init(&isys->lock);
+ spin_lock_init(&isys->power_lock);
+ isys->power = 0;
+
+ mutex_init(&isys->mutex);
+ mutex_init(&isys->stream_mutex);
+ mutex_init(&isys->lib_mutex);
+
+ spin_lock_init(&isys->listlock);
+ INIT_LIST_HEAD(&isys->framebuflist);
+ INIT_LIST_HEAD(&isys->framebuflist_fw);
+
+ dev_dbg(&adev->dev, "isys probe %p %p\n", adev, &adev->dev);
+ ipu_bus_set_drvdata(adev, isys);
+
+ isys->line_align = IPU_ISYS_2600_MEM_LINE_ALIGN;
+ isys->icache_prefetch = 0;
+
+#ifndef CONFIG_PM
+ isys_setup_hw(isys);
+#endif
+
+ if (!isp->secure_mode) {
+ fw = isp->cpd_fw;
+ rval = ipu_buttress_map_fw_image(adev, fw, &isys->fw_sgt);
+ if (rval)
+ goto release_firmware;
+
+ isys->pkg_dir =
+ ipu_cpd_create_pkg_dir(adev, isp->cpd_fw->data,
+ sg_dma_address(isys->fw_sgt.sgl),
+ &isys->pkg_dir_dma_addr,
+ &isys->pkg_dir_size);
+ if (!isys->pkg_dir) {
+ rval = -ENOMEM;
+ goto remove_shared_buffer;
+ }
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ /* Debug fs failure is not fatal. */
+ ipu_isys_init_debugfs(isys);
+#endif
+
+ ipu_trace_init(adev->isp, isys->pdata->base, &adev->dev,
+ isys_trace_blocks);
+
+ cpu_latency_qos_add_request(&isys->pm_qos, PM_QOS_DEFAULT_VALUE);
+ alloc_fw_msg_bufs(isys, 20);
+
+ rval = isys_register_devices(isys);
+ if (rval)
+ goto out_remove_pkg_dir_shared_buffer;
+ rval = isys_iwake_watermark_init(isys);
+ if (rval)
+ goto out_unregister_devices;
+
+ ipu_mmu_hw_cleanup(adev->mmu);
+
+ return 0;
+
+out_unregister_devices:
+ isys_iwake_watermark_cleanup(isys);
+ isys_unregister_devices(isys);
+out_remove_pkg_dir_shared_buffer:
+ if (!isp->secure_mode)
+ ipu_cpd_free_pkg_dir(adev, isys->pkg_dir,
+ isys->pkg_dir_dma_addr,
+ isys->pkg_dir_size);
+remove_shared_buffer:
+ if (!isp->secure_mode)
+ ipu_buttress_unmap_fw_image(adev, &isys->fw_sgt);
+release_firmware:
+ if (!isp->secure_mode)
+ release_firmware(isys->fw);
+ ipu_trace_uninit(&adev->dev);
+
+ mutex_destroy(&isys->mutex);
+ mutex_destroy(&isys->stream_mutex);
+
+ if (isys->short_packet_source == IPU_ISYS_SHORT_PACKET_FROM_TUNIT)
+ mutex_destroy(&isys->short_packet_tracing_mutex);
+
+ ipu_mmu_hw_cleanup(adev->mmu);
+
+ return rval;
+}
+
+struct fwmsg {
+ int type;
+ char *msg;
+ bool valid_ts;
+};
+
+static const struct fwmsg fw_msg[] = {
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE, "STREAM_OPEN_DONE", 0},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK, "STREAM_CLOSE_ACK", 0},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK, "STREAM_START_ACK", 0},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK,
+ "STREAM_START_AND_CAPTURE_ACK", 0},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK, "STREAM_STOP_ACK", 0},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK, "STREAM_FLUSH_ACK", 0},
+ {IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY, "PIN_DATA_READY", 1},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK, "STREAM_CAPTURE_ACK", 0},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE,
+ "STREAM_START_AND_CAPTURE_DONE", 1},
+ {IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE, "STREAM_CAPTURE_DONE", 1},
+ {IPU_FW_ISYS_RESP_TYPE_FRAME_SOF, "FRAME_SOF", 1},
+ {IPU_FW_ISYS_RESP_TYPE_FRAME_EOF, "FRAME_EOF", 1},
+ {IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY, "STATS_READY", 1},
+ {-1, "UNKNOWN MESSAGE", 0},
+};
+
+static int resp_type_to_index(int type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_msg); i++)
+ if (fw_msg[i].type == type)
+ return i;
+
+ return i - 1;
+}
+
+int isys_isr_one(struct ipu_bus_device *adev)
+{
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+ struct ipu_fw_isys_resp_info_abi resp_data;
+ struct ipu_fw_isys_resp_info_abi *resp;
+ struct ipu_isys_pipeline *pipe;
+ u64 ts;
+ unsigned int i;
+
+ if (!isys->fwcom)
+ return 0;
+
+ resp = ipu_fw_isys_get_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES,
+ &resp_data);
+ if (!resp)
+ return 1;
+
+ ts = (u64)resp->timestamp[1] << 32 | resp->timestamp[0];
+
+ if (resp->error_info.error == IPU_FW_ISYS_ERROR_STREAM_IN_SUSPENSION)
+ /* Suspension is kind of special case: not enough buffers */
+ dev_dbg(&adev->dev,
+ "hostlib: error resp %02d %s, stream %u, error SUSPENSION, details %d, timestamp 0x%16.16llx, pin %d\n",
+ resp->type,
+ fw_msg[resp_type_to_index(resp->type)].msg,
+ resp->stream_handle,
+ resp->error_info.error_details,
+ fw_msg[resp_type_to_index(resp->type)].valid_ts ?
+ ts : 0, resp->pin_id);
+ else if (resp->error_info.error)
+ dev_dbg(&adev->dev,
+ "hostlib: error resp %02d %s, stream %u, error %d, details %d, timestamp 0x%16.16llx, pin %d\n",
+ resp->type,
+ fw_msg[resp_type_to_index(resp->type)].msg,
+ resp->stream_handle,
+ resp->error_info.error, resp->error_info.error_details,
+ fw_msg[resp_type_to_index(resp->type)].valid_ts ?
+ ts : 0, resp->pin_id);
+ else
+ dev_dbg(&adev->dev,
+ "hostlib: resp %02d %s, stream %u, timestamp 0x%16.16llx, pin %d\n",
+ resp->type,
+ fw_msg[resp_type_to_index(resp->type)].msg,
+ resp->stream_handle,
+ fw_msg[resp_type_to_index(resp->type)].valid_ts ?
+ ts : 0, resp->pin_id);
+
+ if (resp->stream_handle >= IPU_ISYS_MAX_STREAMS) {
+ dev_err(&adev->dev, "bad stream handle %u\n",
+ resp->stream_handle);
+ goto leave;
+ }
+
+ pipe = isys->pipes[resp->stream_handle];
+ if (!pipe) {
+ dev_err(&adev->dev, "no pipeline for stream %u\n",
+ resp->stream_handle);
+ goto leave;
+ }
+ pipe->error = resp->error_info.error;
+
+ switch (resp->type) {
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_OPEN_DONE:
+ ipu_put_fw_mgs_buf(ipu_bus_get_drvdata(adev), resp->buf_id);
+ complete(&pipe->stream_open_completion);
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_CLOSE_ACK:
+ complete(&pipe->stream_close_completion);
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_START_ACK:
+ complete(&pipe->stream_start_completion);
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_ACK:
+ ipu_put_fw_mgs_buf(ipu_bus_get_drvdata(adev), resp->buf_id);
+ complete(&pipe->stream_start_completion);
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_STOP_ACK:
+ complete(&pipe->stream_stop_completion);
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_FLUSH_ACK:
+ complete(&pipe->stream_stop_completion);
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_PIN_DATA_READY:
+ if (resp->pin_id < IPU_ISYS_OUTPUT_PINS &&
+ pipe->output_pins[resp->pin_id].pin_ready)
+ pipe->output_pins[resp->pin_id].pin_ready(pipe, resp);
+ else
+ dev_err(&adev->dev,
+ "%d:No data pin ready handler for pin id %d\n",
+ resp->stream_handle, resp->pin_id);
+ if (pipe->csi2)
+ ipu_isys_csi2_error(pipe->csi2);
+
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_ACK:
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_START_AND_CAPTURE_DONE:
+ case IPU_FW_ISYS_RESP_TYPE_STREAM_CAPTURE_DONE:
+ if (pipe->interlaced) {
+ struct ipu_isys_buffer *ib, *ib_safe;
+ struct list_head list;
+ unsigned long flags;
+ unsigned int *ts = resp->timestamp;
+
+ if (pipe->isys->short_packet_source ==
+ IPU_ISYS_SHORT_PACKET_FROM_TUNIT)
+ pipe->cur_field =
+ ipu_isys_csi2_get_current_field(pipe, ts);
+
+ /*
+ * Move the pending buffers to a local temp list.
+ * Then we do not need to handle the lock during
+ * the loop.
+ */
+ spin_lock_irqsave(&pipe->short_packet_queue_lock,
+ flags);
+ list_cut_position(&list,
+ &pipe->pending_interlaced_bufs,
+ pipe->pending_interlaced_bufs.prev);
+ spin_unlock_irqrestore(&pipe->short_packet_queue_lock,
+ flags);
+
+ list_for_each_entry_safe(ib, ib_safe, &list, head) {
+ struct vb2_buffer *vb;
+
+ vb = ipu_isys_buffer_to_vb2_buffer(ib);
+ to_vb2_v4l2_buffer(vb)->field = pipe->cur_field;
+ list_del(&ib->head);
+
+ ipu_isys_queue_buf_done(ib);
+ }
+ }
+ for (i = 0; i < IPU_NUM_CAPTURE_DONE; i++)
+ if (pipe->capture_done[i])
+ pipe->capture_done[i] (pipe, resp);
+
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_FRAME_SOF:
+ if (pipe->csi2)
+ ipu_isys_csi2_sof_event(pipe->csi2);
+
+ pipe->seq[pipe->seq_index].sequence =
+ atomic_read(&pipe->sequence) - 1;
+ pipe->seq[pipe->seq_index].timestamp = ts;
+ dev_dbg(&adev->dev,
+ "sof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+ resp->stream_handle,
+ pipe->seq[pipe->seq_index].sequence, ts);
+ pipe->seq_index = (pipe->seq_index + 1)
+ % IPU_ISYS_MAX_PARALLEL_SOF;
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_FRAME_EOF:
+ if (pipe->csi2)
+ ipu_isys_csi2_eof_event(pipe->csi2);
+
+ dev_dbg(&adev->dev,
+ "eof: handle %d: (index %u), timestamp 0x%16.16llx\n",
+ resp->stream_handle,
+ pipe->seq[pipe->seq_index].sequence, ts);
+ break;
+ case IPU_FW_ISYS_RESP_TYPE_STATS_DATA_READY:
+ break;
+ default:
+ dev_err(&adev->dev, "%d:unknown response type %u\n",
+ resp->stream_handle, resp->type);
+ break;
+ }
+
+leave:
+ ipu_fw_isys_put_resp(isys->fwcom, IPU_BASE_MSG_RECV_QUEUES);
+ return 0;
+}
+
+static struct ipu_bus_driver isys_driver = {
+ .probe = isys_probe,
+ .remove = isys_remove,
+ .isr = isys_isr,
+ .wanted = IPU_ISYS_NAME,
+ .drv = {
+ .name = IPU_ISYS_NAME,
+ .owner = THIS_MODULE,
+ .pm = ISYS_PM_OPS,
+ },
+};
+
+module_ipu_bus_driver(isys_driver);
+
+static const struct pci_device_id ipu_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_P_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_N_PCI_ID)},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
+MODULE_AUTHOR("Jouni Högander <jouni.hogander@intel.com>");
+MODULE_AUTHOR("Jouni Ukkonen <jouni.ukkonen@intel.com>");
+MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
+MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
+MODULE_AUTHOR("Leifu Zhao <leifu.zhao@intel.com>");
+MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
+MODULE_AUTHOR("Kun Jiang <kun.jiang@intel.com>");
+MODULE_AUTHOR("Yu Xia <yu.y.xia@intel.com>");
+MODULE_AUTHOR("Jerry Hu <jerry.w.hu@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu input system driver");
new file mode 100644
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_ISYS_H
+#define IPU_ISYS_H
+
+#include <linux/pm_qos.h>
+#include <linux/spinlock.h>
+
+#include <media/v4l2-device.h>
+#include <media/media-device.h>
+
+#include <uapi/linux/ipu-isys.h>
+
+#include "ipu.h"
+#include "ipu-isys-media.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-isys-csi2-be.h"
+#include "ipu-isys-video.h"
+#include "ipu-pdata.h"
+#include "ipu-fw-isys.h"
+#include "ipu-platform-isys.h"
+
+#define IPU_ISYS_2600_MEM_LINE_ALIGN 64
+
+/* for TPG */
+#define IPU_ISYS_FREQ 533000000UL
+
+/*
+ * Current message queue configuration. These must be big enough
+ * so that they never gets full. Queues are located in system memory
+ */
+#define IPU_ISYS_SIZE_RECV_QUEUE 40
+#define IPU_ISYS_SIZE_SEND_QUEUE 40
+#define IPU_ISYS_SIZE_PROXY_RECV_QUEUE 5
+#define IPU_ISYS_SIZE_PROXY_SEND_QUEUE 5
+#define IPU_ISYS_NUM_RECV_QUEUE 1
+
+/*
+ * Device close takes some time from last ack message to actual stopping
+ * of the SP processor. As long as the SP processor runs we can't proceed with
+ * clean up of resources.
+ */
+#define IPU_ISYS_OPEN_TIMEOUT_US 1000
+#define IPU_ISYS_OPEN_RETRY 1000
+#define IPU_ISYS_TURNOFF_DELAY_US 1000
+#define IPU_ISYS_TURNOFF_TIMEOUT 1000
+#define IPU_LIB_CALL_TIMEOUT_JIFFIES \
+ msecs_to_jiffies(IPU_LIB_CALL_TIMEOUT_MS)
+
+#define IPU_ISYS_CSI2_LONG_PACKET_HEADER_SIZE 32
+#define IPU_ISYS_CSI2_LONG_PACKET_FOOTER_SIZE 32
+
+#define IPU_ISYS_MIN_WIDTH 1U
+#define IPU_ISYS_MIN_HEIGHT 1U
+#define IPU_ISYS_MAX_WIDTH 16384U
+#define IPU_ISYS_MAX_HEIGHT 16384U
+
+#define NR_OF_CSI2_BE_SOC_DEV 1
+
+/* the threshold granularity is 2KB on IPU6 */
+#define IPU6_SRAM_GRANULRITY_SHIFT 11
+#define IPU6_SRAM_GRANULRITY_SIZE 2048
+/* the threshold granularity is 1KB on IPU6SE */
+#define IPU6SE_SRAM_GRANULRITY_SHIFT 10
+#define IPU6SE_SRAM_GRANULRITY_SIZE 1024
+
+struct task_struct;
+
+struct ltr_did {
+ union {
+ u32 value;
+ struct {
+ u8 val0;
+ u8 val1;
+ u8 val2;
+ u8 val3;
+ } bits;
+ } lut_ltr;
+ union {
+ u32 value;
+ struct {
+ u8 th0;
+ u8 th1;
+ u8 th2;
+ u8 th3;
+ } bits;
+ } lut_fill_time;
+};
+
+struct isys_iwake_watermark {
+ bool iwake_enabled;
+ bool force_iwake_disable;
+ u32 iwake_threshold;
+ u64 isys_pixelbuffer_datarate;
+ struct ltr_did ltrdid;
+ struct mutex mutex; /* protect whole struct */
+ struct ipu_isys *isys;
+ struct list_head video_list;
+};
+struct ipu_isys_sensor_info {
+ unsigned int vc1_data_start;
+ unsigned int vc1_data_end;
+ unsigned int vc0_data_start;
+ unsigned int vc0_data_end;
+ unsigned int vc1_pdaf_start;
+ unsigned int vc1_pdaf_end;
+ unsigned int sensor_metadata;
+};
+
+/*
+ * struct ipu_isys
+ *
+ * @media_dev: Media device
+ * @v4l2_dev: V4L2 device
+ * @adev: ISYS bus device
+ * @power: Is ISYS powered on or not?
+ * @isr_bits: Which bits does the ISR handle?
+ * @power_lock: Serialise access to power (power state in general)
+ * @csi2_rx_ctrl_cached: cached shared value between all CSI2 receivers
+ * @lock: serialise access to pipes
+ * @pipes: pipelines per stream ID
+ * @fwcom: fw communication layer private pointer
+ * or optional external library private pointer
+ * @line_align: line alignment in memory
+ * @reset_needed: Isys requires d0i0->i3 transition
+ * @video_opened: total number of opened file handles on video nodes
+ * @mutex: serialise access isys video open/release related operations
+ * @stream_mutex: serialise stream start and stop, queueing requests
+ * @lib_mutex: optional external library mutex
+ * @pdata: platform data pointer
+ * @csi2: CSI-2 receivers
+ * @csi2_be: CSI-2 back-ends
+ * @fw: ISYS firmware binary (unsecure firmware)
+ * @fw_sgt: fw scatterlist
+ * @pkg_dir: host pointer to pkg_dir
+ * @pkg_dir_dma_addr: I/O virtual address for pkg_dir
+ * @pkg_dir_size: size of pkg_dir in bytes
+ * @short_packet_source: select short packet capture mode
+ */
+struct ipu_isys {
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct ipu_bus_device *adev;
+
+ int power;
+ spinlock_t power_lock; /* Serialise access to power */
+ u32 isr_csi2_bits;
+ u32 csi2_rx_ctrl_cached;
+ spinlock_t lock; /* Serialise access to pipes */
+ struct ipu_isys_pipeline *pipes[IPU_ISYS_MAX_STREAMS];
+ void *fwcom;
+ unsigned int line_align;
+ bool reset_needed;
+ bool icache_prefetch;
+ bool csi2_cse_ipc_not_supported;
+ unsigned int video_opened;
+ unsigned int stream_opened;
+ struct ipu_isys_sensor_info sensor_info;
+ unsigned int sensor_types[N_IPU_FW_ISYS_SENSOR_TYPE];
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfsdir;
+#endif
+ struct mutex mutex; /* Serialise isys video open/release related */
+ struct mutex stream_mutex; /* Stream start, stop, queueing reqs */
+ struct mutex lib_mutex; /* Serialise optional external library mutex */
+
+ struct ipu_isys_pdata *pdata;
+
+ struct ipu_isys_csi2 *csi2;
+ struct ipu_isys_csi2_be csi2_be;
+ struct ipu_isys_csi2_be_soc csi2_be_soc[NR_OF_CSI2_BE_SOC_DEV];
+ const struct firmware *fw;
+ struct sg_table fw_sgt;
+
+ u64 *pkg_dir;
+ dma_addr_t pkg_dir_dma_addr;
+ unsigned int pkg_dir_size;
+
+ struct list_head requests;
+ struct pm_qos_request pm_qos;
+ unsigned int short_packet_source;
+ struct ipu_isys_csi2_monitor_message *short_packet_trace_buffer;
+ dma_addr_t short_packet_trace_buffer_dma_addr;
+ unsigned int short_packet_tracing_count;
+ struct mutex short_packet_tracing_mutex; /* For tracing count */
+ u64 tsc_timer_base;
+ u64 tunit_timer_base;
+ spinlock_t listlock; /* Protect framebuflist */
+ struct list_head framebuflist;
+ struct list_head framebuflist_fw;
+ struct v4l2_async_notifier notifier;
+ struct isys_iwake_watermark *iwake_watermark;
+
+};
+
+void update_watermark_setting(struct ipu_isys *isys);
+
+struct isys_fw_msgs {
+ union {
+ u64 dummy;
+ struct ipu_fw_isys_frame_buff_set_abi frame;
+ struct ipu_fw_isys_stream_cfg_data_abi stream;
+ } fw_msg;
+ struct list_head head;
+ dma_addr_t dma_addr;
+};
+
+#define to_frame_msg_buf(a) (&(a)->fw_msg.frame)
+#define to_stream_cfg_msg_buf(a) (&(a)->fw_msg.stream)
+#define to_dma_addr(a) ((a)->dma_addr)
+
+struct isys_fw_msgs *ipu_get_fw_msg_buf(struct ipu_isys_pipeline *ip);
+void ipu_put_fw_mgs_buf(struct ipu_isys *isys, u64 data);
+void ipu_cleanup_fw_msg_bufs(struct ipu_isys *isys);
+
+extern const struct v4l2_ioctl_ops ipu_isys_ioctl_ops;
+
+void isys_setup_hw(struct ipu_isys *isys);
+int isys_isr_one(struct ipu_bus_device *adev);
+irqreturn_t isys_isr(struct ipu_bus_device *adev);
+#ifdef IPU_ISYS_GPC
+int ipu_isys_gpc_init_debugfs(struct ipu_isys *isys);
+#endif
+
+#endif /* IPU_ISYS_H */
new file mode 100644
@@ -0,0 +1,858 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2021 Intel Corporation
+
+#include <asm/cacheflush.h>
+
+#include <linux/device.h>
+#include <linux/iova.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+
+#include "ipu.h"
+#include "ipu-platform.h"
+#include "ipu-dma.h"
+#include "ipu-mmu.h"
+#include "ipu-platform-regs.h"
+
+#define ISP_PAGE_SHIFT 12
+#define ISP_PAGE_SIZE BIT(ISP_PAGE_SHIFT)
+#define ISP_PAGE_MASK (~(ISP_PAGE_SIZE - 1))
+
+#define ISP_L1PT_SHIFT 22
+#define ISP_L1PT_MASK (~((1U << ISP_L1PT_SHIFT) - 1))
+
+#define ISP_L2PT_SHIFT 12
+#define ISP_L2PT_MASK (~(ISP_L1PT_MASK | (~(ISP_PAGE_MASK))))
+
+#define ISP_L1PT_PTES 1024
+#define ISP_L2PT_PTES 1024
+
+#define ISP_PADDR_SHIFT 12
+
+#define REG_TLB_INVALIDATE 0x0000
+
+#define REG_L1_PHYS 0x0004 /* 27-bit pfn */
+#define REG_INFO 0x0008
+
+/* The range of stream ID i in L1 cache is from 0 to 15 */
+#define MMUV2_REG_L1_STREAMID(i) (0x0c + ((i) * 4))
+
+/* The range of stream ID i in L2 cache is from 0 to 15 */
+#define MMUV2_REG_L2_STREAMID(i) (0x4c + ((i) * 4))
+
+#define TBL_PHYS_ADDR(a) ((phys_addr_t)(a) << ISP_PADDR_SHIFT)
+
+static void tlb_invalidate(struct ipu_mmu *mmu)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ if (!mmu->ready) {
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ /*
+ * To avoid the HW bug induced dead lock in some of the IPU
+ * MMUs on successive invalidate calls, we need to first do a
+ * read to the page table base before writing the invalidate
+ * register. MMUs which need to implement this WA, will have
+ * the insert_read_before_invalidate flags set as true.
+ * Disregard the return value of the read.
+ */
+ if (mmu->mmu_hw[i].insert_read_before_invalidate)
+ readl(mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+ writel(0xffffffff, mmu->mmu_hw[i].base +
+ REG_TLB_INVALIDATE);
+ /*
+ * The TLB invalidation is a "single cycle" (IOMMU clock cycles)
+ * When the actual MMIO write reaches the IPU TLB Invalidate
+ * register, wmb() will force the TLB invalidate out if the CPU
+ * attempts to update the IOMMU page table (or sooner).
+ */
+ wmb();
+ }
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+}
+
+#ifdef DEBUG
+static void page_table_dump(struct ipu_mmu_info *mmu_info)
+{
+ u32 l1_idx;
+
+ dev_dbg(mmu_info->dev, "begin IOMMU page table dump\n");
+
+ for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ u32 l2_idx;
+ u32 iova = (phys_addr_t)l1_idx << ISP_L1PT_SHIFT;
+
+ if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval)
+ continue;
+ dev_dbg(mmu_info->dev,
+ "l1 entry %u; iovas 0x%8.8x-0x%8.8x, at %p\n",
+ l1_idx, iova, iova + ISP_PAGE_SIZE,
+ (void *)TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]));
+
+ for (l2_idx = 0; l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ u32 *l2_pt = mmu_info->l2_pts[l1_idx];
+ u32 iova2 = iova + (l2_idx << ISP_L2PT_SHIFT);
+
+ if (l2_pt[l2_idx] == mmu_info->dummy_page_pteval)
+ continue;
+
+ dev_dbg(mmu_info->dev,
+ "\tl2 entry %u; iova 0x%8.8x, phys %p\n",
+ l2_idx, iova2,
+ (void *)TBL_PHYS_ADDR(l2_pt[l2_idx]));
+ }
+ }
+
+ dev_dbg(mmu_info->dev, "end IOMMU page table dump\n");
+}
+#endif /* DEBUG */
+
+static dma_addr_t map_single(struct ipu_mmu_info *mmu_info, void *ptr)
+{
+ dma_addr_t dma;
+
+ dma = dma_map_single(mmu_info->dev, ptr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu_info->dev, dma))
+ return 0;
+
+ return dma;
+}
+
+static int get_dummy_page(struct ipu_mmu_info *mmu_info)
+{
+ dma_addr_t dma;
+ void *pt = (void *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map dummy page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->dummy_page = pt;
+ mmu_info->dummy_page_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_page(struct ipu_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_page_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_page);
+}
+
+static int alloc_dummy_l2_pt(struct ipu_mmu_info *mmu_info)
+{
+ dma_addr_t dma;
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ int i;
+
+ if (!pt)
+ return -ENOMEM;
+
+ dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+ goto err_free_page;
+ }
+
+ for (i = 0; i < ISP_L2PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ mmu_info->dummy_l2_pt = pt;
+ mmu_info->dummy_l2_pteval = dma >> ISP_PAGE_SHIFT;
+
+ return 0;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return -ENOMEM;
+}
+
+static void free_dummy_l2_pt(struct ipu_mmu_info *mmu_info)
+{
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->dummy_l2_pteval),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+}
+
+static u32 *alloc_l1_pt(struct ipu_mmu_info *mmu_info)
+{
+ dma_addr_t dma;
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_l2_pteval;
+
+ dma = map_single(mmu_info, pt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l1pt page\n");
+ goto err_free_page;
+ }
+
+ mmu_info->l1_pt_dma = dma >> ISP_PADDR_SHIFT;
+ dev_dbg(mmu_info->dev, "l1 pt %p mapped at %llx\n", pt, dma);
+
+ return pt;
+
+err_free_page:
+ free_page((unsigned long)pt);
+ return NULL;
+}
+
+static u32 *alloc_l2_pt(struct ipu_mmu_info *mmu_info)
+{
+ u32 *pt = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ int i;
+
+ if (!pt)
+ return NULL;
+
+ dev_dbg(mmu_info->dev, "%s get_zeroed_page() == %p\n", __func__, pt);
+
+ for (i = 0; i < ISP_L1PT_PTES; i++)
+ pt[i] = mmu_info->dummy_page_pteval;
+
+ return pt;
+}
+
+static int l2_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+ u32 l1_entry;
+ u32 *l2_pt, *l2_virt;
+ u32 iova_start = iova;
+ unsigned int l2_idx;
+ unsigned long flags;
+ dma_addr_t dma;
+
+ dev_dbg(mmu_info->dev,
+ "mapping l2 page table for l1 index %u (iova %8.8x)\n",
+ l1_idx, (u32)iova);
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ l1_entry = mmu_info->l1_pt[l1_idx];
+ if (l1_entry == mmu_info->dummy_l2_pteval) {
+ l2_virt = mmu_info->l2_pts[l1_idx];
+ if (likely(!l2_virt)) {
+ l2_virt = alloc_l2_pt(mmu_info);
+ if (!l2_virt) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ dma = map_single(mmu_info, l2_virt);
+ if (!dma) {
+ dev_err(mmu_info->dev, "Failed to map l2pt page\n");
+ free_page((unsigned long)l2_virt);
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -EINVAL;
+ }
+
+ l1_entry = dma >> ISP_PADDR_SHIFT;
+
+ dev_dbg(mmu_info->dev, "page for l1_idx %u %p allocated\n",
+ l1_idx, l2_virt);
+ mmu_info->l1_pt[l1_idx] = l1_entry;
+ mmu_info->l2_pts[l1_idx] = l2_virt;
+ clflush_cache_range(&mmu_info->l1_pt[l1_idx],
+ sizeof(mmu_info->l1_pt[l1_idx]));
+ }
+
+ l2_pt = mmu_info->l2_pts[l1_idx];
+
+ dev_dbg(mmu_info->dev, "l2_pt at %p with dma 0x%x\n", l2_pt, l1_entry);
+
+ paddr = ALIGN(paddr, ISP_PAGE_SIZE);
+
+ l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+
+ dev_dbg(mmu_info->dev, "l2_idx %u, phys 0x%8.8x\n", l2_idx,
+ l2_pt[l2_idx]);
+ if (l2_pt[l2_idx] != mmu_info->dummy_page_pteval) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ return -EINVAL;
+ }
+
+ l2_pt[l2_idx] = paddr >> ISP_PADDR_SHIFT;
+
+ clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ dev_dbg(mmu_info->dev, "l2 index %u mapped as 0x%8.8x\n", l2_idx,
+ l2_pt[l2_idx]);
+
+ return 0;
+}
+
+static int __ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ u32 iova_start = round_down(iova, ISP_PAGE_SIZE);
+ u32 iova_end = ALIGN(iova + size, ISP_PAGE_SIZE);
+
+ dev_dbg(mmu_info->dev,
+ "mapping iova 0x%8.8x--0x%8.8x, size %zu at paddr 0x%10.10llx\n",
+ iova_start, iova_end, size, paddr);
+
+ return l2_map(mmu_info, iova_start, paddr, size);
+}
+
+static size_t l2_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t dummy, size_t size)
+{
+ u32 l1_idx = iova >> ISP_L1PT_SHIFT;
+ u32 *l2_pt;
+ u32 iova_start = iova;
+ unsigned int l2_idx;
+ size_t unmapped = 0;
+ unsigned long flags;
+
+ dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
+ l1_idx, iova);
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+ dev_err(mmu_info->dev,
+ "unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
+ iova, l1_idx);
+ return 0;
+ }
+
+ for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
+ (iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
+ < iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
+ l2_pt = mmu_info->l2_pts[l1_idx];
+ dev_dbg(mmu_info->dev,
+ "unmap l2 index %u with pteval 0x%10.10llx\n",
+ l2_idx, TBL_PHYS_ADDR(l2_pt[l2_idx]));
+ l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
+
+ clflush_cache_range(&l2_pt[l2_idx], sizeof(l2_pt[l2_idx]));
+ unmapped++;
+ }
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return unmapped << ISP_PAGE_SHIFT;
+}
+
+static size_t __ipu_mmu_unmap(struct ipu_mmu_info *mmu_info,
+ unsigned long iova, size_t size)
+{
+ return l2_unmap(mmu_info, iova, 0, size);
+}
+
+static int allocate_trash_buffer(struct ipu_mmu *mmu)
+{
+ unsigned int n_pages = PAGE_ALIGN(IPU_MMUV2_TRASH_RANGE) >> PAGE_SHIFT;
+ struct iova *iova;
+ u32 iova_addr;
+ unsigned int i;
+ dma_addr_t dma;
+ int ret;
+
+ /* Allocate 8MB in iova range */
+ iova = alloc_iova(&mmu->dmap->iovad, n_pages,
+ mmu->dmap->mmu_info->aperture_end >> PAGE_SHIFT, 0);
+ if (!iova) {
+ dev_err(mmu->dev, "cannot allocate iova range for trash\n");
+ return -ENOMEM;
+ }
+
+ dma = dma_map_page(mmu->dmap->mmu_info->dev, mmu->trash_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(mmu->dmap->mmu_info->dev, dma)) {
+ dev_err(mmu->dmap->mmu_info->dev, "Failed to map trash page\n");
+ ret = -ENOMEM;
+ goto out_free_iova;
+ }
+
+ mmu->pci_trash_page = dma;
+
+ /*
+ * Map the 8MB iova address range to the same physical trash page
+ * mmu->trash_page which is already reserved at the probe
+ */
+ iova_addr = iova->pfn_lo;
+ for (i = 0; i < n_pages; i++) {
+ ret = ipu_mmu_map(mmu->dmap->mmu_info, iova_addr << PAGE_SHIFT,
+ mmu->pci_trash_page, PAGE_SIZE);
+ if (ret) {
+ dev_err(mmu->dev,
+ "mapping trash buffer range failed\n");
+ goto out_unmap;
+ }
+
+ iova_addr++;
+ }
+
+ mmu->iova_trash_page = iova->pfn_lo << PAGE_SHIFT;
+ dev_dbg(mmu->dev, "iova trash buffer for MMUID: %d is %u\n",
+ mmu->mmid, (unsigned int)mmu->iova_trash_page);
+ return 0;
+
+out_unmap:
+ ipu_mmu_unmap(mmu->dmap->mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT);
+ dma_unmap_page(mmu->dmap->mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+out_free_iova:
+ __free_iova(&mmu->dmap->iovad, iova);
+ return ret;
+}
+
+int ipu_mmu_hw_init(struct ipu_mmu *mmu)
+{
+ unsigned int i;
+ unsigned long flags;
+ struct ipu_mmu_info *mmu_info;
+
+ dev_dbg(mmu->dev, "mmu hw init\n");
+
+ mmu_info = mmu->dmap->mmu_info;
+
+ /* Initialise the each MMU HW block */
+ for (i = 0; i < mmu->nr_mmus; i++) {
+ struct ipu_mmu_hw *mmu_hw = &mmu->mmu_hw[i];
+ unsigned int j;
+ u16 block_addr;
+
+ /* Write page table address per MMU */
+ writel((phys_addr_t)mmu_info->l1_pt_dma,
+ mmu->mmu_hw[i].base + REG_L1_PHYS);
+
+ /* Set info bits per MMU */
+ writel(mmu->mmu_hw[i].info_bits,
+ mmu->mmu_hw[i].base + REG_INFO);
+
+ /* Configure MMU TLB stream configuration for L1 */
+ for (j = 0, block_addr = 0; j < mmu_hw->nr_l1streams;
+ block_addr += mmu->mmu_hw[i].l1_block_sz[j], j++) {
+ if (block_addr > IPU_MAX_LI_BLOCK_ADDR) {
+ dev_err(mmu->dev, "invalid L1 configuration\n");
+ return -EINVAL;
+ }
+
+ /* Write block start address for each streams */
+ writel(block_addr, mmu_hw->base +
+ mmu_hw->l1_stream_id_reg_offset + 4 * j);
+ }
+
+ /* Configure MMU TLB stream configuration for L2 */
+ for (j = 0, block_addr = 0; j < mmu_hw->nr_l2streams;
+ block_addr += mmu->mmu_hw[i].l2_block_sz[j], j++) {
+ if (block_addr > IPU_MAX_L2_BLOCK_ADDR) {
+ dev_err(mmu->dev, "invalid L2 configuration\n");
+ return -EINVAL;
+ }
+
+ writel(block_addr, mmu_hw->base +
+ mmu_hw->l2_stream_id_reg_offset + 4 * j);
+ }
+ }
+
+ if (!mmu->trash_page) {
+ int ret;
+
+ mmu->trash_page = alloc_page(GFP_KERNEL);
+ if (!mmu->trash_page) {
+ dev_err(mmu->dev, "insufficient memory for trash buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = allocate_trash_buffer(mmu);
+ if (ret) {
+ __free_page(mmu->trash_page);
+ mmu->trash_page = NULL;
+ dev_err(mmu->dev, "trash buffer allocation failed\n");
+ return ret;
+ }
+ }
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = true;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_mmu_hw_init);
+
+static struct ipu_mmu_info *ipu_mmu_alloc(struct ipu_device *isp)
+{
+ struct ipu_mmu_info *mmu_info;
+ int ret;
+
+ mmu_info = kzalloc(sizeof(*mmu_info), GFP_KERNEL);
+ if (!mmu_info)
+ return NULL;
+
+ mmu_info->aperture_start = 0;
+ mmu_info->aperture_end = DMA_BIT_MASK(isp->secure_mode ?
+ IPU_MMU_ADDRESS_BITS :
+ IPU_MMU_ADDRESS_BITS_NON_SECURE);
+ mmu_info->pgsize_bitmap = SZ_4K;
+ mmu_info->dev = &isp->pdev->dev;
+
+ ret = get_dummy_page(mmu_info);
+ if (ret)
+ goto err_free_info;
+
+ ret = alloc_dummy_l2_pt(mmu_info);
+ if (ret)
+ goto err_free_dummy_page;
+
+ mmu_info->l2_pts = vzalloc(ISP_L2PT_PTES * sizeof(*mmu_info->l2_pts));
+ if (!mmu_info->l2_pts)
+ goto err_free_dummy_l2_pt;
+
+ /*
+ * We always map the L1 page table (a single page as well as
+ * the L2 page tables).
+ */
+ mmu_info->l1_pt = alloc_l1_pt(mmu_info);
+ if (!mmu_info->l1_pt)
+ goto err_free_l2_pts;
+
+ spin_lock_init(&mmu_info->lock);
+
+ dev_dbg(mmu_info->dev, "domain initialised\n");
+
+ return mmu_info;
+
+err_free_l2_pts:
+ vfree(mmu_info->l2_pts);
+err_free_dummy_l2_pt:
+ free_dummy_l2_pt(mmu_info);
+err_free_dummy_page:
+ free_dummy_page(mmu_info);
+err_free_info:
+ kfree(mmu_info);
+
+ return NULL;
+}
+
+int ipu_mmu_hw_cleanup(struct ipu_mmu *mmu)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mmu->ready_lock, flags);
+ mmu->ready = false;
+ spin_unlock_irqrestore(&mmu->ready_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_mmu_hw_cleanup);
+
+static struct ipu_dma_mapping *alloc_dma_mapping(struct ipu_device *isp)
+{
+ struct ipu_dma_mapping *dmap;
+
+ dmap = kzalloc(sizeof(*dmap), GFP_KERNEL);
+ if (!dmap)
+ return NULL;
+
+ dmap->mmu_info = ipu_mmu_alloc(isp);
+ if (!dmap->mmu_info) {
+ kfree(dmap);
+ return NULL;
+ }
+ init_iova_domain(&dmap->iovad, SZ_4K, 1);
+ dmap->mmu_info->dmap = dmap;
+
+ kref_init(&dmap->ref);
+
+ dev_dbg(&isp->pdev->dev, "alloc mapping\n");
+
+ iova_cache_get();
+
+ return dmap;
+}
+
+phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info,
+ dma_addr_t iova)
+{
+ unsigned long flags;
+ u32 *l2_pt;
+ phys_addr_t phy_addr;
+
+ spin_lock_irqsave(&mmu_info->lock, flags);
+ l2_pt = mmu_info->l2_pts[iova >> ISP_L1PT_SHIFT];
+ phy_addr = (phys_addr_t)l2_pt[(iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT];
+ phy_addr <<= ISP_PAGE_SHIFT;
+ spin_unlock_irqrestore(&mmu_info->lock, flags);
+
+ return phy_addr;
+}
+
+/*
+ * The following four functions are implemented based on iommu.c
+ * drivers/iommu/iommu.c:iommu_pgsize().
+ */
+static size_t ipu_mmu_pgsize(unsigned long pgsize_bitmap,
+ unsigned long addr_merge, size_t size)
+{
+ unsigned int pgsize_idx;
+ size_t pgsize;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ /* need to consider alignment requirements ? */
+ if (likely(addr_merge)) {
+ /* Max page size allowed by address */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ /* build a mask of acceptable page sizes */
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+ /* throw away page sizes not supported by the hardware */
+ pgsize &= pgsize_bitmap;
+
+ /* make sure we're still sane */
+ WARN_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ return pgsize;
+}
+
+/* drivers/iommu/iommu.c:iommu_unmap() */
+size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
+ size_t size)
+{
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * The virtual address, as well as the size of the mapping, must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ size_t pgsize = ipu_mmu_pgsize(mmu_info->pgsize_bitmap,
+ iova, size - unmapped);
+
+ unmapped_page = __ipu_mmu_unmap(mmu_info, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n",
+ iova, unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ return unmapped;
+}
+
+/* drivers/iommu/iommu.c:iommu_map() */
+int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ unsigned long orig_iova = iova;
+ unsigned int min_pagesz;
+ size_t orig_size = size;
+ int ret = 0;
+
+ if (mmu_info->pgsize_bitmap == 0UL)
+ return -ENODEV;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ dev_err(mmu_info->dev,
+ "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+ iova, &paddr, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu_info->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
+
+ while (size) {
+ size_t pgsize = ipu_mmu_pgsize(mmu_info->pgsize_bitmap,
+ iova | paddr, size);
+
+ dev_dbg(mmu_info->dev,
+ "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+ iova, &paddr, pgsize);
+
+ ret = __ipu_mmu_map(mmu_info, iova, paddr, pgsize);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ ipu_mmu_unmap(mmu_info, orig_iova, orig_size - size);
+
+ return ret;
+}
+
+static void ipu_mmu_destroy(struct ipu_mmu *mmu)
+{
+ struct ipu_dma_mapping *dmap = mmu->dmap;
+ struct ipu_mmu_info *mmu_info = dmap->mmu_info;
+ struct iova *iova;
+ u32 l1_idx;
+
+ if (mmu->iova_trash_page) {
+ iova = find_iova(&dmap->iovad,
+ mmu->iova_trash_page >> PAGE_SHIFT);
+ if (iova) {
+ /* unmap and free the trash buffer iova */
+ ipu_mmu_unmap(mmu_info, iova->pfn_lo << PAGE_SHIFT,
+ (iova->pfn_hi - iova->pfn_lo + 1) <<
+ PAGE_SHIFT);
+ __free_iova(&dmap->iovad, iova);
+ } else {
+ dev_err(mmu->dev, "trash buffer iova not found.\n");
+ }
+
+ mmu->iova_trash_page = 0;
+ dma_unmap_page(mmu_info->dev, mmu->pci_trash_page,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ mmu->pci_trash_page = 0;
+ __free_page(mmu->trash_page);
+ }
+
+ for (l1_idx = 0; l1_idx < ISP_L1PT_PTES; l1_idx++) {
+ if (mmu_info->l1_pt[l1_idx] != mmu_info->dummy_l2_pteval) {
+ dma_unmap_single(mmu_info->dev,
+ TBL_PHYS_ADDR(mmu_info->l1_pt[l1_idx]),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->l2_pts[l1_idx]);
+ }
+ }
+
+ free_dummy_page(mmu_info);
+ dma_unmap_single(mmu_info->dev, mmu_info->l1_pt_dma << ISP_PADDR_SHIFT,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)mmu_info->dummy_l2_pt);
+ free_page((unsigned long)mmu_info->l1_pt);
+ kfree(mmu_info);
+}
+
+struct ipu_mmu *ipu_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu_hw_variants *hw)
+{
+ struct ipu_mmu *mmu;
+ struct ipu_mmu_pdata *pdata;
+ struct ipu_device *isp = pci_get_drvdata(to_pci_dev(dev));
+ unsigned int i;
+
+ if (hw->nr_mmus > IPU_MMU_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < hw->nr_mmus; i++) {
+ struct ipu_mmu_hw *pdata_mmu = &pdata->mmu_hw[i];
+ const struct ipu_mmu_hw *src_mmu = &hw->mmu_hw[i];
+
+ if (src_mmu->nr_l1streams > IPU_MMU_MAX_TLB_L1_STREAMS ||
+ src_mmu->nr_l2streams > IPU_MMU_MAX_TLB_L2_STREAMS)
+ return ERR_PTR(-EINVAL);
+
+ *pdata_mmu = *src_mmu;
+ pdata_mmu->base = base + src_mmu->offset;
+ }
+
+ mmu = devm_kzalloc(dev, sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return ERR_PTR(-ENOMEM);
+
+ mmu->mmid = mmid;
+ mmu->mmu_hw = pdata->mmu_hw;
+ mmu->nr_mmus = hw->nr_mmus;
+ mmu->tlb_invalidate = tlb_invalidate;
+ mmu->ready = false;
+ INIT_LIST_HEAD(&mmu->vma_list);
+ spin_lock_init(&mmu->ready_lock);
+
+ mmu->dmap = alloc_dma_mapping(isp);
+ if (!mmu->dmap) {
+ dev_err(dev, "can't alloc dma mapping\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return mmu;
+}
+
+void ipu_mmu_cleanup(struct ipu_mmu *mmu)
+{
+ struct ipu_dma_mapping *dmap = mmu->dmap;
+
+ ipu_mmu_destroy(mmu);
+ mmu->dmap = NULL;
+ iova_cache_put();
+ put_iova_domain(&dmap->iovad);
+ kfree(dmap);
+}
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu mmu driver");
new file mode 100644
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2021 Intel Corporation */
+
+#ifndef IPU_MMU_H
+#define IPU_MMU_H
+
+#include <linux/dma-mapping.h>
+
+#include "ipu.h"
+#include "ipu-pdata.h"
+
+#define ISYS_MMID 1
+#define PSYS_MMID 0
+
+/*
+ * @pgtbl: virtual address of the l1 page table (one page)
+ */
+struct ipu_mmu_info {
+ struct device *dev;
+
+ u32 __iomem *l1_pt;
+ u32 l1_pt_dma;
+ u32 **l2_pts;
+
+ u32 *dummy_l2_pt;
+ u32 dummy_l2_pteval;
+ void *dummy_page;
+ u32 dummy_page_pteval;
+
+ dma_addr_t aperture_start;
+ dma_addr_t aperture_end;
+ unsigned long pgsize_bitmap;
+
+ spinlock_t lock; /* Serialize access to users */
+ struct ipu_dma_mapping *dmap;
+};
+
+/*
+ * @pgtbl: physical address of the l1 page table
+ */
+struct ipu_mmu {
+ struct list_head node;
+
+ struct ipu_mmu_hw *mmu_hw;
+ unsigned int nr_mmus;
+ int mmid;
+
+ phys_addr_t pgtbl;
+ struct device *dev;
+
+ struct ipu_dma_mapping *dmap;
+ struct list_head vma_list;
+
+ struct page *trash_page;
+ dma_addr_t pci_trash_page; /* IOVA from PCI DMA services (parent) */
+ dma_addr_t iova_trash_page; /* IOVA for IPU child nodes to use */
+
+ bool ready;
+ spinlock_t ready_lock; /* Serialize access to bool ready */
+
+ void (*tlb_invalidate)(struct ipu_mmu *mmu);
+};
+
+struct ipu_mmu *ipu_mmu_init(struct device *dev,
+ void __iomem *base, int mmid,
+ const struct ipu_hw_variants *hw);
+void ipu_mmu_cleanup(struct ipu_mmu *mmu);
+int ipu_mmu_hw_init(struct ipu_mmu *mmu);
+int ipu_mmu_hw_cleanup(struct ipu_mmu *mmu);
+int ipu_mmu_map(struct ipu_mmu_info *mmu_info, unsigned long iova,
+ phys_addr_t paddr, size_t size);
+size_t ipu_mmu_unmap(struct ipu_mmu_info *mmu_info, unsigned long iova,
+ size_t size);
+phys_addr_t ipu_mmu_iova_to_phys(struct ipu_mmu_info *mmu_info,
+ dma_addr_t iova);
+#endif
new file mode 100644
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2021 Intel Corporation */
+
+#ifndef IPU_PDATA_H
+#define IPU_PDATA_H
+
+#define IPU_MMU_NAME IPU_NAME "-mmu"
+#define IPU_ISYS_CSI2_NAME IPU_NAME "-csi2"
+#define IPU_ISYS_NAME IPU_NAME "-isys"
+#define IPU_PSYS_NAME IPU_NAME "-psys"
+#define IPU_BUTTRESS_NAME IPU_NAME "-buttress"
+
+#define IPU_MMU_MAX_DEVICES 4
+#define IPU_MMU_ADDRESS_BITS 32
+/* The firmware is accessible within the first 2 GiB only in non-secure mode. */
+#define IPU_MMU_ADDRESS_BITS_NON_SECURE 31
+
+#define IPU_MMU_MAX_TLB_L1_STREAMS 32
+#define IPU_MMU_MAX_TLB_L2_STREAMS 32
+#define IPU_MAX_LI_BLOCK_ADDR 128
+#define IPU_MAX_L2_BLOCK_ADDR 64
+
+#define IPU_ISYS_MAX_CSI2_LEGACY_PORTS 4
+#define IPU_ISYS_MAX_CSI2_COMBO_PORTS 2
+
+#define IPU_MAX_FRAME_COUNTER 0xff
+
+/*
+ * To maximize the IOSF utlization, IPU need to send requests in bursts.
+ * At the DMA interface with the buttress, there are CDC FIFOs with burst
+ * collection capability. CDC FIFO burst collectors have a configurable
+ * threshold and is configured based on the outcome of performance measurements.
+ *
+ * isys has 3 ports with IOSF interface for VC0, VC1 and VC2
+ * psys has 4 ports with IOSF interface for VC0, VC1w, VC1r and VC2
+ *
+ * Threshold values are pre-defined and are arrived at after performance
+ * evaluations on a type of IPU
+ */
+#define IPU_MAX_VC_IOSF_PORTS 4
+
+/*
+ * IPU must configure correct arbitration mechanism related to the IOSF VC
+ * requests. There are two options per VC0 and VC1 - > 0 means rearbitrate on
+ * stall and 1 means stall until the request is completed.
+ */
+#define IPU_BTRS_ARB_MODE_TYPE_REARB 0
+#define IPU_BTRS_ARB_MODE_TYPE_STALL 1
+
+/* Currently chosen arbitration mechanism for VC0 */
+#define IPU_BTRS_ARB_STALL_MODE_VC0 \
+ IPU_BTRS_ARB_MODE_TYPE_REARB
+
+/* Currently chosen arbitration mechanism for VC1 */
+#define IPU_BTRS_ARB_STALL_MODE_VC1 \
+ IPU_BTRS_ARB_MODE_TYPE_REARB
+
+struct ipu_isys_subdev_pdata;
+
+/*
+ * MMU Invalidation HW bug workaround by ZLW mechanism
+ *
+ * Old IPU MMUV2 has a bug in the invalidation mechanism which might result in
+ * wrong translation or replication of the translation. This will cause data
+ * corruption. So we cannot directly use the MMU V2 invalidation registers
+ * to invalidate the MMU. Instead, whenever an invalidate is called, we need to
+ * clear the TLB by evicting all the valid translations by filling it with trash
+ * buffer (which is guaranteed not to be used by any other processes). ZLW is
+ * used to fill the L1 and L2 caches with the trash buffer translations. ZLW
+ * or Zero length write, is pre-fetch mechanism to pre-fetch the pages in
+ * advance to the L1 and L2 caches without triggering any memory operations.
+ *
+ * In MMU V2, L1 -> 16 streams and 64 blocks, maximum 16 blocks per stream
+ * One L1 block has 16 entries, hence points to 16 * 4K pages
+ * L2 -> 16 streams and 32 blocks. 2 blocks per streams
+ * One L2 block maps to 1024 L1 entries, hence points to 4MB address range
+ * 2 blocks per L2 stream means, 1 stream points to 8MB range
+ *
+ * As we need to clear the caches and 8MB being the biggest cache size, we need
+ * to have trash buffer which points to 8MB address range. As these trash
+ * buffers are not used for any memory transactions, we need only the least
+ * amount of physical memory. So we reserve 8MB IOVA address range but only
+ * one page is reserved from physical memory. Each of this 8MB IOVA address
+ * range is then mapped to the same physical memory page.
+ */
+/* One L2 entry maps 1024 L1 entries and one L1 entry per page */
+#define IPU_MMUV2_L2_RANGE (1024 * PAGE_SIZE)
+/* Max L2 blocks per stream */
+#define IPU_MMUV2_MAX_L2_BLOCKS 2
+/* Max L1 blocks per stream */
+#define IPU_MMUV2_MAX_L1_BLOCKS 16
+#define IPU_MMUV2_TRASH_RANGE (IPU_MMUV2_L2_RANGE * \
+ IPU_MMUV2_MAX_L2_BLOCKS)
+/* Entries per L1 block */
+#define MMUV2_ENTRIES_PER_L1_BLOCK 16
+#define MMUV2_TRASH_L1_BLOCK_OFFSET (MMUV2_ENTRIES_PER_L1_BLOCK * \
+ PAGE_SIZE)
+#define MMUV2_TRASH_L2_BLOCK_OFFSET IPU_MMUV2_L2_RANGE
+
+/*
+ * In some of the IPU MMUs, there is provision to configure L1 and L2 page
+ * table caches. Both these L1 and L2 caches are divided into multiple sections
+ * called streams. There is maximum 16 streams for both caches. Each of these
+ * sections are subdivided into multiple blocks. When nr_l1streams = 0 and
+ * nr_l2streams = 0, means the MMU is of type MMU_V1 and do not support
+ * L1/L2 page table caches.
+ *
+ * L1 stream per block sizes are configurable and varies per usecase.
+ * L2 has constant block sizes - 2 blocks per stream.
+ *
+ * MMU1 support pre-fetching of the pages to have less cache lookup misses. To
+ * enable the pre-fetching, MMU1 AT (Address Translator) device registers
+ * need to be configured.
+ *
+ * There are four types of memory accesses which requires ZLW configuration.
+ * ZLW(Zero Length Write) is a mechanism to enable VT-d pre-fetching on IOMMU.
+ *
+ * 1. Sequential Access or 1D mode
+ * Set ZLW_EN -> 1
+ * set ZLW_PAGE_CROSS_1D -> 1
+ * Set ZLW_N to "N" pages so that ZLW will be inserte N pages ahead where
+ * N is pre-defined and hardcoded in the platform data
+ * Set ZLW_2D -> 0
+ *
+ * 2. ZLW 2D mode
+ * Set ZLW_EN -> 1
+ * set ZLW_PAGE_CROSS_1D -> 1,
+ * Set ZLW_N -> 0
+ * Set ZLW_2D -> 1
+ *
+ * 3. ZLW Enable (no 1D or 2D mode)
+ * Set ZLW_EN -> 1
+ * set ZLW_PAGE_CROSS_1D -> 0,
+ * Set ZLW_N -> 0
+ * Set ZLW_2D -> 0
+ *
+ * 4. ZLW disable
+ * Set ZLW_EN -> 0
+ * set ZLW_PAGE_CROSS_1D -> 0,
+ * Set ZLW_N -> 0
+ * Set ZLW_2D -> 0
+ *
+ * To configure the ZLW for the above memory access, four registers are
+ * available. Hence to track these four settings, we have the following entries
+ * in the struct ipu_mmu_hw. Each of these entries are per stream and
+ * available only for the L1 streams.
+ *
+ * a. l1_zlw_en -> To track zlw enabled per stream (ZLW_EN)
+ * b. l1_zlw_1d_mode -> Track 1D mode per stream. ZLW inserted at page boundary
+ * c. l1_ins_zlw_ahead_pages -> to track how advance the ZLW need to be inserted
+ * Insert ZLW request N pages ahead address.
+ * d. l1_zlw_2d_mode -> To track 2D mode per stream (ZLW_2D)
+ *
+ *
+ * Currently L1/L2 streams, blocks, AT ZLW configurations etc. are pre-defined
+ * as per the usecase specific calculations. Any change to this pre-defined
+ * table has to happen in sync with IPU FW.
+ */
+struct ipu_mmu_hw {
+ union {
+ unsigned long offset;
+ void __iomem *base;
+ };
+ unsigned int info_bits;
+ u8 nr_l1streams;
+ /*
+ * L1 has variable blocks per stream - total of 64 blocks and maximum of
+ * 16 blocks per stream. Configurable by using the block start address
+ * per stream. Block start address is calculated from the block size
+ */
+ u8 l1_block_sz[IPU_MMU_MAX_TLB_L1_STREAMS];
+ /* Is ZLW is enabled in each stream */
+ bool l1_zlw_en[IPU_MMU_MAX_TLB_L1_STREAMS];
+ bool l1_zlw_1d_mode[IPU_MMU_MAX_TLB_L1_STREAMS];
+ u8 l1_ins_zlw_ahead_pages[IPU_MMU_MAX_TLB_L1_STREAMS];
+ bool l1_zlw_2d_mode[IPU_MMU_MAX_TLB_L1_STREAMS];
+
+ u32 l1_stream_id_reg_offset;
+ u32 l2_stream_id_reg_offset;
+
+ u8 nr_l2streams;
+ /*
+ * L2 has fixed 2 blocks per stream. Block address is calculated
+ * from the block size
+ */
+ u8 l2_block_sz[IPU_MMU_MAX_TLB_L2_STREAMS];
+ /* flag to track if WA is needed for successive invalidate HW bug */
+ bool insert_read_before_invalidate;
+};
+
+struct ipu_mmu_pdata {
+ unsigned int nr_mmus;
+ struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES];
+ int mmid;
+};
+
+struct ipu_isys_csi2_pdata {
+ void __iomem *base;
+};
+
+#define IPU_EV_AUTO 0xff
+
+struct ipu_isys_internal_csi2_pdata {
+ unsigned int nports;
+ unsigned int *offsets;
+};
+
+/*
+ * One place to handle all the IPU HW variations
+ */
+struct ipu_hw_variants {
+ unsigned long offset;
+ unsigned int nr_mmus;
+ struct ipu_mmu_hw mmu_hw[IPU_MMU_MAX_DEVICES];
+ u8 cdc_fifos;
+ u8 cdc_fifo_threshold[IPU_MAX_VC_IOSF_PORTS];
+ u32 dmem_offset;
+ u32 spc_offset; /* SPC offset from psys base */
+};
+
+struct ipu_isys_internal_pdata {
+ struct ipu_isys_internal_csi2_pdata csi2;
+ struct ipu_hw_variants hw_variant;
+ u32 num_parallel_streams;
+ u32 isys_dma_overshoot;
+};
+
+struct ipu_isys_pdata {
+ void __iomem *base;
+ const struct ipu_isys_internal_pdata *ipdata;
+};
+
+struct ipu_psys_internal_pdata {
+ struct ipu_hw_variants hw_variant;
+};
+
+struct ipu_psys_pdata {
+ void __iomem *base;
+ const struct ipu_psys_internal_pdata *ipdata;
+};
+
+#endif
new file mode 100644
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/compat.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu-psys.h"
+
+static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = -ENOTTY;
+
+ if (file->f_op->unlocked_ioctl)
+ ret = file->f_op->unlocked_ioctl(file, cmd, arg);
+
+ return ret;
+}
+
+struct ipu_psys_buffer32 {
+ u64 len;
+ union {
+ int fd;
+ compat_uptr_t userptr;
+ u64 reserved;
+ } base;
+ u32 data_offset;
+ u32 bytes_used;
+ u32 flags;
+ u32 reserved[2];
+} __packed;
+
+struct ipu_psys_command32 {
+ u64 issue_id;
+ u64 user_token;
+ u32 priority;
+ compat_uptr_t pg_manifest;
+ compat_uptr_t buffers;
+ int pg;
+ u32 pg_manifest_size;
+ u32 bufcount;
+ u32 min_psys_freq;
+ u32 frame_counter;
+ u32 reserved[2];
+} __packed;
+
+struct ipu_psys_manifest32 {
+ u32 index;
+ u32 size;
+ compat_uptr_t manifest;
+ u32 reserved[5];
+} __packed;
+
+static int
+get_ipu_psys_command32(struct ipu_psys_command *kp,
+ struct ipu_psys_command32 __user *up)
+{
+ compat_uptr_t pgm, bufs;
+ bool access_ok;
+
+ access_ok = access_ok(up, sizeof(struct ipu_psys_command32));
+ if (!access_ok || get_user(kp->issue_id, &up->issue_id) ||
+ get_user(kp->user_token, &up->user_token) ||
+ get_user(kp->priority, &up->priority) ||
+ get_user(pgm, &up->pg_manifest) ||
+ get_user(bufs, &up->buffers) ||
+ get_user(kp->pg, &up->pg) ||
+ get_user(kp->pg_manifest_size, &up->pg_manifest_size) ||
+ get_user(kp->bufcount, &up->bufcount) ||
+ get_user(kp->min_psys_freq, &up->min_psys_freq) ||
+ get_user(kp->frame_counter, &up->frame_counter)
+ )
+ return -EFAULT;
+
+ kp->pg_manifest = compat_ptr(pgm);
+ kp->buffers = compat_ptr(bufs);
+
+ return 0;
+}
+
+static int
+get_ipu_psys_buffer32(struct ipu_psys_buffer *kp,
+ struct ipu_psys_buffer32 __user *up)
+{
+ compat_uptr_t ptr;
+ bool access_ok;
+
+ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32));
+ if (!access_ok || get_user(kp->len, &up->len) ||
+ get_user(ptr, &up->base.userptr) ||
+ get_user(kp->data_offset, &up->data_offset) ||
+ get_user(kp->bytes_used, &up->bytes_used) ||
+ get_user(kp->flags, &up->flags))
+ return -EFAULT;
+
+ kp->base.userptr = compat_ptr(ptr);
+
+ return 0;
+}
+
+static int
+put_ipu_psys_buffer32(struct ipu_psys_buffer *kp,
+ struct ipu_psys_buffer32 __user *up)
+{
+ bool access_ok;
+
+ access_ok = access_ok(up, sizeof(struct ipu_psys_buffer32));
+ if (!access_ok || put_user(kp->len, &up->len) ||
+ put_user(kp->base.fd, &up->base.fd) ||
+ put_user(kp->data_offset, &up->data_offset) ||
+ put_user(kp->bytes_used, &up->bytes_used) ||
+ put_user(kp->flags, &up->flags))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int
+get_ipu_psys_manifest32(struct ipu_psys_manifest *kp,
+ struct ipu_psys_manifest32 __user *up)
+{
+ compat_uptr_t ptr;
+ bool access_ok;
+
+ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32));
+ if (!access_ok || get_user(kp->index, &up->index) ||
+ get_user(kp->size, &up->size) || get_user(ptr, &up->manifest))
+ return -EFAULT;
+
+ kp->manifest = compat_ptr(ptr);
+
+ return 0;
+}
+
+static int
+put_ipu_psys_manifest32(struct ipu_psys_manifest *kp,
+ struct ipu_psys_manifest32 __user *up)
+{
+ compat_uptr_t ptr = (u32)((unsigned long)kp->manifest);
+ bool access_ok;
+
+ access_ok = access_ok(up, sizeof(struct ipu_psys_manifest32));
+ if (!access_ok || put_user(kp->index, &up->index) ||
+ put_user(kp->size, &up->size) || put_user(ptr, &up->manifest))
+ return -EFAULT;
+
+ return 0;
+}
+
+#define IPU_IOC_GETBUF32 _IOWR('A', 4, struct ipu_psys_buffer32)
+#define IPU_IOC_PUTBUF32 _IOWR('A', 5, struct ipu_psys_buffer32)
+#define IPU_IOC_QCMD32 _IOWR('A', 6, struct ipu_psys_command32)
+#define IPU_IOC_CMD_CANCEL32 _IOWR('A', 8, struct ipu_psys_command32)
+#define IPU_IOC_GET_MANIFEST32 _IOWR('A', 9, struct ipu_psys_manifest32)
+
+long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ union {
+ struct ipu_psys_buffer buf;
+ struct ipu_psys_command cmd;
+ struct ipu_psys_event ev;
+ struct ipu_psys_manifest m;
+ } karg;
+ int compatible_arg = 1;
+ int err = 0;
+ void __user *up = compat_ptr(arg);
+
+ switch (cmd) {
+ case IPU_IOC_GETBUF32:
+ cmd = IPU_IOC_GETBUF;
+ break;
+ case IPU_IOC_PUTBUF32:
+ cmd = IPU_IOC_PUTBUF;
+ break;
+ case IPU_IOC_QCMD32:
+ cmd = IPU_IOC_QCMD;
+ break;
+ case IPU_IOC_GET_MANIFEST32:
+ cmd = IPU_IOC_GET_MANIFEST;
+ break;
+ }
+
+ switch (cmd) {
+ case IPU_IOC_GETBUF:
+ case IPU_IOC_PUTBUF:
+ err = get_ipu_psys_buffer32(&karg.buf, up);
+ compatible_arg = 0;
+ break;
+ case IPU_IOC_QCMD:
+ err = get_ipu_psys_command32(&karg.cmd, up);
+ compatible_arg = 0;
+ break;
+ case IPU_IOC_GET_MANIFEST:
+ err = get_ipu_psys_manifest32(&karg.m, up);
+ compatible_arg = 0;
+ break;
+ }
+ if (err)
+ return err;
+
+ if (compatible_arg) {
+ err = native_ioctl(file, cmd, (unsigned long)up);
+ } else {
+ mm_segment_t old_fs = force_uaccess_begin();
+
+ err = native_ioctl(file, cmd, (unsigned long)&karg);
+ force_uaccess_end(old_fs);
+ }
+
+ if (err)
+ return err;
+
+ switch (cmd) {
+ case IPU_IOC_GETBUF:
+ err = put_ipu_psys_buffer32(&karg.buf, up);
+ break;
+ case IPU_IOC_GET_MANIFEST:
+ err = put_ipu_psys_manifest32(&karg.m, up);
+ break;
+ }
+ return err;
+}
new file mode 100644
@@ -0,0 +1,1618 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/firmware.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/init_task.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/version.h>
+#include <linux/poll.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu.h"
+#include "ipu-mmu.h"
+#include "ipu-bus.h"
+#include "ipu-platform.h"
+#include "ipu-buttress.h"
+#include "ipu-cpd.h"
+#include "ipu-fw-psys.h"
+#include "ipu-psys.h"
+#include "ipu-platform-psys.h"
+#include "ipu-platform-regs.h"
+#include "ipu-fw-com.h"
+
+static bool async_fw_init;
+module_param(async_fw_init, bool, 0664);
+MODULE_PARM_DESC(async_fw_init, "Enable asynchronous firmware initialization");
+
+#define IPU_PSYS_NUM_DEVICES 4
+#define IPU_PSYS_AUTOSUSPEND_DELAY 2000
+
+#ifdef CONFIG_PM
+static int psys_runtime_pm_resume(struct device *dev);
+static int psys_runtime_pm_suspend(struct device *dev);
+#else
+#define pm_runtime_dont_use_autosuspend(d)
+#define pm_runtime_use_autosuspend(d)
+#define pm_runtime_set_autosuspend_delay(d, f) 0
+#define pm_runtime_get_sync(d) 0
+#define pm_runtime_put(d) 0
+#define pm_runtime_put_sync(d) 0
+#define pm_runtime_put_noidle(d) 0
+#define pm_runtime_put_autosuspend(d) 0
+#endif
+
+static dev_t ipu_psys_dev_t;
+static DECLARE_BITMAP(ipu_psys_devices, IPU_PSYS_NUM_DEVICES);
+static DEFINE_MUTEX(ipu_psys_mutex);
+
+static struct fw_init_task {
+ struct delayed_work work;
+ struct ipu_psys *psys;
+} fw_init_task;
+
+static void ipu_psys_remove(struct ipu_bus_device *adev);
+
+static struct bus_type ipu_psys_bus = {
+ .name = IPU_PSYS_NAME,
+};
+
+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size)
+{
+ struct ipu_psys_pg *kpg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&psys->pgs_lock, flags);
+ list_for_each_entry(kpg, &psys->pgs, list) {
+ if (!kpg->pg_size && kpg->size >= pg_size) {
+ kpg->pg_size = pg_size;
+ spin_unlock_irqrestore(&psys->pgs_lock, flags);
+ return kpg;
+ }
+ }
+ spin_unlock_irqrestore(&psys->pgs_lock, flags);
+ /* no big enough buffer available, allocate new one */
+ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
+ if (!kpg)
+ return NULL;
+
+ kpg->pg = dma_alloc_attrs(&psys->adev->dev, pg_size,
+ &kpg->pg_dma_addr, GFP_KERNEL, 0);
+ if (!kpg->pg) {
+ kfree(kpg);
+ return NULL;
+ }
+
+ kpg->pg_size = pg_size;
+ kpg->size = pg_size;
+ spin_lock_irqsave(&psys->pgs_lock, flags);
+ list_add(&kpg->list, &psys->pgs);
+ spin_unlock_irqrestore(&psys->pgs_lock, flags);
+
+ return kpg;
+}
+
+static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh,
+ struct ipu_psys_kbuffer *kbuf);
+struct ipu_psys_kbuffer *ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd)
+{
+ struct ipu_psys_kbuffer *kbuf;
+
+ list_for_each_entry(kbuf, &fh->bufmap, list) {
+ if (kbuf->fd == fd)
+ return kbuf;
+ }
+
+ return NULL;
+}
+
+struct ipu_psys_kbuffer *
+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr)
+{
+ struct ipu_psys_kbuffer *kbuffer;
+
+ list_for_each_entry(kbuffer, &fh->bufmap, list) {
+ if (kbuffer->kaddr == kaddr)
+ return kbuffer;
+ }
+
+ return NULL;
+}
+
+static int ipu_psys_get_userpages(struct ipu_dma_buf_attach *attach)
+{
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ int npages, array_size;
+ struct page **pages;
+ struct sg_table *sgt;
+ int nr = 0;
+ int ret = -ENOMEM;
+
+ start = (unsigned long)attach->userptr;
+ end = PAGE_ALIGN(start + attach->len);
+ npages = (end - (start & PAGE_MASK)) >> PAGE_SHIFT;
+ array_size = npages * sizeof(struct page *);
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return -ENOMEM;
+
+ if (attach->npages != 0) {
+ pages = attach->pages;
+ npages = attach->npages;
+ attach->vma_is_io = 1;
+ goto skip_pages;
+ }
+
+ pages = kvzalloc(array_size, GFP_KERNEL);
+ if (!pages)
+ goto free_sgt;
+
+ mmap_read_lock(current->mm);
+ vma = find_vma(current->mm, start);
+ if (!vma) {
+ ret = -EFAULT;
+ goto error_up_read;
+ }
+
+ /*
+ * For buffers from Gralloc, VM_PFNMAP is expected,
+ * but VM_IO is set. Possibly bug in Gralloc.
+ */
+ attach->vma_is_io = vma->vm_flags & (VM_IO | VM_PFNMAP);
+
+ if (attach->vma_is_io) {
+ unsigned long io_start = start;
+
+ if (vma->vm_end < start + attach->len) {
+ dev_err(attach->dev,
+ "vma at %lu is too small for %llu bytes\n",
+ start, attach->len);
+ ret = -EFAULT;
+ goto error_up_read;
+ }
+
+ for (nr = 0; nr < npages; nr++, io_start += PAGE_SIZE) {
+ unsigned long pfn;
+
+ ret = follow_pfn(vma, io_start, &pfn);
+ if (ret)
+ goto error_up_read;
+ pages[nr] = pfn_to_page(pfn);
+ }
+ } else {
+ nr = get_user_pages(start & PAGE_MASK, npages,
+ FOLL_WRITE,
+ pages, NULL);
+ if (nr < npages)
+ goto error_up_read;
+ }
+ mmap_read_unlock(current->mm);
+
+ attach->pages = pages;
+ attach->npages = npages;
+
+skip_pages:
+ ret = sg_alloc_table_from_pages(sgt, pages, npages,
+ start & ~PAGE_MASK, attach->len,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto error;
+
+ attach->sgt = sgt;
+
+ return 0;
+
+error_up_read:
+ mmap_read_unlock(current->mm);
+error:
+ if (!attach->vma_is_io)
+ while (nr > 0)
+ put_page(pages[--nr]);
+
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+free_sgt:
+ kfree(sgt);
+
+ dev_err(attach->dev, "failed to get userpages:%d\n", ret);
+
+ return ret;
+}
+
+static void ipu_psys_put_userpages(struct ipu_dma_buf_attach *attach)
+{
+ if (!attach || !attach->userptr || !attach->sgt)
+ return;
+
+ if (!attach->vma_is_io) {
+ int i = attach->npages;
+
+ while (--i >= 0) {
+ set_page_dirty_lock(attach->pages[i]);
+ put_page(attach->pages[i]);
+ }
+ }
+
+ kvfree(attach->pages);
+
+ sg_free_table(attach->sgt);
+ kfree(attach->sgt);
+ attach->sgt = NULL;
+}
+
+static int ipu_dma_buf_attach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *attach)
+{
+ struct ipu_psys_kbuffer *kbuf = dbuf->priv;
+ struct ipu_dma_buf_attach *ipu_attach;
+
+ ipu_attach = kzalloc(sizeof(*ipu_attach), GFP_KERNEL);
+ if (!ipu_attach)
+ return -ENOMEM;
+
+ ipu_attach->len = kbuf->len;
+ ipu_attach->userptr = kbuf->userptr;
+
+ attach->priv = ipu_attach;
+ return 0;
+}
+
+static void ipu_dma_buf_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *attach)
+{
+ struct ipu_dma_buf_attach *ipu_attach = attach->priv;
+
+ kfree(ipu_attach);
+ attach->priv = NULL;
+}
+
+static struct sg_table *ipu_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct ipu_dma_buf_attach *ipu_attach = attach->priv;
+ unsigned long attrs;
+ int ret;
+
+ ret = ipu_psys_get_userpages(ipu_attach);
+ if (ret)
+ return NULL;
+
+ attrs = DMA_ATTR_SKIP_CPU_SYNC;
+ ret = dma_map_sg_attrs(attach->dev, ipu_attach->sgt->sgl,
+ ipu_attach->sgt->orig_nents, dir, attrs);
+ if (ret < ipu_attach->sgt->orig_nents) {
+ ipu_psys_put_userpages(ipu_attach);
+ dev_dbg(attach->dev, "buf map failed\n");
+
+ return ERR_PTR(-EIO);
+ }
+
+ /*
+ * Initial cache flush to avoid writing dirty pages for buffers which
+ * are later marked as IPU_BUFFER_FLAG_NO_FLUSH.
+ */
+ dma_sync_sg_for_device(attach->dev, ipu_attach->sgt->sgl,
+ ipu_attach->sgt->orig_nents, DMA_BIDIRECTIONAL);
+
+ return ipu_attach->sgt;
+}
+
+static void ipu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ struct ipu_dma_buf_attach *ipu_attach = attach->priv;
+
+ dma_unmap_sg(attach->dev, sg->sgl, sg->orig_nents, dir);
+ ipu_psys_put_userpages(ipu_attach);
+}
+
+static int ipu_dma_buf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+ return -ENOTTY;
+}
+
+static void ipu_dma_buf_release(struct dma_buf *buf)
+{
+ struct ipu_psys_kbuffer *kbuf = buf->priv;
+
+ if (!kbuf)
+ return;
+
+ if (kbuf->db_attach) {
+ dev_dbg(kbuf->db_attach->dev,
+ "releasing buffer %d\n", kbuf->fd);
+ ipu_psys_put_userpages(kbuf->db_attach->priv);
+ }
+ kfree(kbuf);
+}
+
+static int ipu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction dir)
+{
+ return -ENOTTY;
+}
+
+static int ipu_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct dma_buf_attachment *attach;
+ struct ipu_dma_buf_attach *ipu_attach;
+
+ if (list_empty(&dmabuf->attachments))
+ return -EINVAL;
+
+ attach = list_last_entry(&dmabuf->attachments,
+ struct dma_buf_attachment, node);
+ ipu_attach = attach->priv;
+
+ if (!ipu_attach || !ipu_attach->pages || !ipu_attach->npages)
+ return -EINVAL;
+
+ map->vaddr = vm_map_ram(ipu_attach->pages, ipu_attach->npages, 0);
+ map->is_iomem = false;
+ if (!map->vaddr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ipu_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct dma_buf_attachment *attach;
+ struct ipu_dma_buf_attach *ipu_attach;
+
+ if (WARN_ON(list_empty(&dmabuf->attachments)))
+ return;
+
+ attach = list_last_entry(&dmabuf->attachments,
+ struct dma_buf_attachment, node);
+ ipu_attach = attach->priv;
+
+ if (WARN_ON(!ipu_attach || !ipu_attach->pages || !ipu_attach->npages))
+ return;
+
+ vm_unmap_ram(map->vaddr, ipu_attach->npages);
+}
+
+struct dma_buf_ops ipu_dma_buf_ops = {
+ .attach = ipu_dma_buf_attach,
+ .detach = ipu_dma_buf_detach,
+ .map_dma_buf = ipu_dma_buf_map,
+ .unmap_dma_buf = ipu_dma_buf_unmap,
+ .release = ipu_dma_buf_release,
+ .begin_cpu_access = ipu_dma_buf_begin_cpu_access,
+ .mmap = ipu_dma_buf_mmap,
+ .vmap = ipu_dma_buf_vmap,
+ .vunmap = ipu_dma_buf_vunmap,
+};
+
+static int ipu_psys_open(struct inode *inode, struct file *file)
+{
+ struct ipu_psys *psys = inode_to_ipu_psys(inode);
+ struct ipu_device *isp = psys->adev->isp;
+ struct ipu_psys_fh *fh;
+ int rval;
+
+ if (isp->flr_done)
+ return -EIO;
+
+ fh = kzalloc(sizeof(*fh), GFP_KERNEL);
+ if (!fh)
+ return -ENOMEM;
+
+ fh->psys = psys;
+
+ file->private_data = fh;
+
+ mutex_init(&fh->mutex);
+ INIT_LIST_HEAD(&fh->bufmap);
+ init_waitqueue_head(&fh->wait);
+
+ rval = ipu_psys_fh_init(fh);
+ if (rval)
+ goto open_failed;
+
+ mutex_lock(&psys->mutex);
+ list_add_tail(&fh->list, &psys->fhs);
+ mutex_unlock(&psys->mutex);
+
+ return 0;
+
+open_failed:
+ mutex_destroy(&fh->mutex);
+ kfree(fh);
+ return rval;
+}
+
+static inline void ipu_psys_kbuf_unmap(struct ipu_psys_kbuffer *kbuf)
+{
+ if (!kbuf)
+ return;
+
+ kbuf->valid = false;
+ if (kbuf->kaddr) {
+ struct dma_buf_map dmap;
+
+ dma_buf_map_set_vaddr(&dmap, kbuf->kaddr);
+ dma_buf_vunmap(kbuf->dbuf, &dmap);
+ }
+ if (kbuf->sgt)
+ dma_buf_unmap_attachment(kbuf->db_attach,
+ kbuf->sgt,
+ DMA_BIDIRECTIONAL);
+ if (kbuf->db_attach)
+ dma_buf_detach(kbuf->dbuf, kbuf->db_attach);
+ dma_buf_put(kbuf->dbuf);
+
+ kbuf->db_attach = NULL;
+ kbuf->dbuf = NULL;
+ kbuf->sgt = NULL;
+}
+
+static int ipu_psys_release(struct inode *inode, struct file *file)
+{
+ struct ipu_psys *psys = inode_to_ipu_psys(inode);
+ struct ipu_psys_fh *fh = file->private_data;
+ struct ipu_psys_kbuffer *kbuf, *kbuf0;
+ struct dma_buf_attachment *db_attach;
+
+ mutex_lock(&fh->mutex);
+ /* clean up buffers */
+ if (!list_empty(&fh->bufmap)) {
+ list_for_each_entry_safe(kbuf, kbuf0, &fh->bufmap, list) {
+ list_del(&kbuf->list);
+ db_attach = kbuf->db_attach;
+
+ /* Unmap and release buffers */
+ if (kbuf->dbuf && db_attach) {
+
+ ipu_psys_kbuf_unmap(kbuf);
+ } else {
+ if (db_attach)
+ ipu_psys_put_userpages(db_attach->priv);
+ kfree(kbuf);
+ }
+ }
+ }
+ mutex_unlock(&fh->mutex);
+
+ mutex_lock(&psys->mutex);
+ list_del(&fh->list);
+
+ mutex_unlock(&psys->mutex);
+ ipu_psys_fh_deinit(fh);
+
+ mutex_lock(&psys->mutex);
+ if (list_empty(&psys->fhs))
+ psys->power_gating = 0;
+ mutex_unlock(&psys->mutex);
+ mutex_destroy(&fh->mutex);
+ kfree(fh);
+
+ return 0;
+}
+
+static int ipu_psys_getbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
+{
+ struct ipu_psys_kbuffer *kbuf;
+ struct ipu_psys *psys = fh->psys;
+
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dbuf;
+ int ret;
+
+ if (!buf->base.userptr) {
+ dev_err(&psys->adev->dev, "Buffer allocation not supported\n");
+ return -EINVAL;
+ }
+
+ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ kbuf->len = buf->len;
+ kbuf->userptr = buf->base.userptr;
+ kbuf->flags = buf->flags;
+
+ exp_info.ops = &ipu_dma_buf_ops;
+ exp_info.size = kbuf->len;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = kbuf;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf)) {
+ kfree(kbuf);
+ return PTR_ERR(dbuf);
+ }
+
+ ret = dma_buf_fd(dbuf, 0);
+ if (ret < 0) {
+ kfree(kbuf);
+ dma_buf_put(dbuf);
+ return ret;
+ }
+
+ kbuf->fd = ret;
+ buf->base.fd = ret;
+ kbuf->flags = buf->flags &= ~IPU_BUFFER_FLAG_USERPTR;
+ kbuf->flags = buf->flags |= IPU_BUFFER_FLAG_DMA_HANDLE;
+
+ mutex_lock(&fh->mutex);
+ list_add(&kbuf->list, &fh->bufmap);
+ mutex_unlock(&fh->mutex);
+
+ dev_dbg(&psys->adev->dev, "IOC_GETBUF: userptr %p size %llu to fd %d",
+ buf->base.userptr, buf->len, buf->base.fd);
+
+ return 0;
+}
+
+static int ipu_psys_putbuf(struct ipu_psys_buffer *buf, struct ipu_psys_fh *fh)
+{
+ return 0;
+}
+
+int ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh,
+ struct ipu_psys_kbuffer *kbuf)
+{
+ struct ipu_psys *psys = fh->psys;
+ struct dma_buf *dbuf;
+ struct dma_buf_map dmap;
+ int ret;
+
+ dbuf = dma_buf_get(fd);
+ if (IS_ERR(dbuf))
+ return -EINVAL;
+
+ if (!kbuf) {
+ /* This fd isn't generated by ipu_psys_getbuf, it
+ * is a new fd. Create a new kbuf item for this fd, and
+ * add this kbuf to bufmap list.
+ */
+ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto mapbuf_fail;
+ }
+
+ list_add(&kbuf->list, &fh->bufmap);
+ }
+
+ /* fd valid and found, need remap */
+ if (kbuf->dbuf && (kbuf->dbuf != dbuf || kbuf->len != dbuf->size)) {
+ dev_dbg(&psys->adev->dev,
+ "dmabuf fd %d with kbuf %p changed, need remap.\n",
+ fd, kbuf);
+ ret = ipu_psys_unmapbuf_locked(fd, fh, kbuf);
+ if (ret)
+ goto mapbuf_fail;
+
+ kbuf = ipu_psys_lookup_kbuffer(fh, fd);
+ /* changed external dmabuf */
+ if (!kbuf) {
+ kbuf = kzalloc(sizeof(*kbuf), GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto mapbuf_fail;
+ }
+ list_add(&kbuf->list, &fh->bufmap);
+ }
+ }
+
+ if (kbuf->sgt) {
+ dev_dbg(&psys->adev->dev, "fd %d has been mapped!\n", fd);
+ dma_buf_put(dbuf);
+ goto mapbuf_end;
+ }
+
+ kbuf->dbuf = dbuf;
+
+ if (kbuf->len == 0)
+ kbuf->len = kbuf->dbuf->size;
+
+ kbuf->fd = fd;
+
+ kbuf->db_attach = dma_buf_attach(kbuf->dbuf, &psys->adev->dev);
+ if (IS_ERR(kbuf->db_attach)) {
+ ret = PTR_ERR(kbuf->db_attach);
+ dev_dbg(&psys->adev->dev, "dma buf attach failed\n");
+ goto kbuf_map_fail;
+ }
+
+ kbuf->sgt = dma_buf_map_attachment(kbuf->db_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(kbuf->sgt)) {
+ ret = -EINVAL;
+ kbuf->sgt = NULL;
+ dev_dbg(&psys->adev->dev, "dma buf map attachment failed\n");
+ goto kbuf_map_fail;
+ }
+
+ kbuf->dma_addr = sg_dma_address(kbuf->sgt->sgl);
+
+ ret = dma_buf_vmap(kbuf->dbuf, &dmap);
+ if (ret) {
+ dev_dbg(&psys->adev->dev, "dma buf vmap failed\n");
+ goto kbuf_map_fail;
+ }
+ kbuf->kaddr = dmap.vaddr;
+
+ dev_dbg(&psys->adev->dev, "%s kbuf %p fd %d with len %llu mapped\n",
+ __func__, kbuf, fd, kbuf->len);
+mapbuf_end:
+
+ kbuf->valid = true;
+
+ return 0;
+
+kbuf_map_fail:
+ ipu_psys_kbuf_unmap(kbuf);
+
+ list_del(&kbuf->list);
+ if (!kbuf->userptr)
+ kfree(kbuf);
+ return ret;
+
+mapbuf_fail:
+ dma_buf_put(dbuf);
+
+ dev_err(&psys->adev->dev, "%s failed for fd %d\n", __func__, fd);
+ return ret;
+}
+
+static long ipu_psys_mapbuf(int fd, struct ipu_psys_fh *fh)
+{
+ long ret;
+ struct ipu_psys_kbuffer *kbuf;
+
+ mutex_lock(&fh->mutex);
+ kbuf = ipu_psys_lookup_kbuffer(fh, fd);
+ ret = ipu_psys_mapbuf_locked(fd, fh, kbuf);
+ mutex_unlock(&fh->mutex);
+
+ dev_dbg(&fh->psys->adev->dev, "IOC_MAPBUF ret %ld\n", ret);
+
+ return ret;
+}
+
+static int ipu_psys_unmapbuf_locked(int fd, struct ipu_psys_fh *fh,
+ struct ipu_psys_kbuffer *kbuf)
+{
+ struct ipu_psys *psys = fh->psys;
+
+ if (!kbuf || fd != kbuf->fd) {
+ dev_err(&psys->adev->dev, "invalid kbuffer\n");
+ return -EINVAL;
+ }
+
+ /* From now on it is not safe to use this kbuffer */
+ ipu_psys_kbuf_unmap(kbuf);
+
+ list_del(&kbuf->list);
+
+ if (!kbuf->userptr)
+ kfree(kbuf);
+
+ dev_dbg(&psys->adev->dev, "%s fd %d unmapped\n", __func__, fd);
+
+ return 0;
+}
+
+static long ipu_psys_unmapbuf(int fd, struct ipu_psys_fh *fh)
+{
+ struct ipu_psys_kbuffer *kbuf;
+ long ret;
+
+ mutex_lock(&fh->mutex);
+ kbuf = ipu_psys_lookup_kbuffer(fh, fd);
+ if (!kbuf) {
+ dev_err(&fh->psys->adev->dev,
+ "buffer with fd %d not found\n", fd);
+ mutex_unlock(&fh->mutex);
+ return -EINVAL;
+ }
+ ret = ipu_psys_unmapbuf_locked(fd, fh, kbuf);
+ mutex_unlock(&fh->mutex);
+
+ dev_dbg(&fh->psys->adev->dev, "IOC_UNMAPBUF\n");
+
+ return ret;
+}
+
+static unsigned int ipu_psys_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct ipu_psys_fh *fh = file->private_data;
+ struct ipu_psys *psys = fh->psys;
+ unsigned int res = 0;
+
+ dev_dbg(&psys->adev->dev, "ipu psys poll\n");
+
+ poll_wait(file, &fh->wait, wait);
+
+ if (ipu_get_completed_kcmd(fh))
+ res = POLLIN;
+
+ dev_dbg(&psys->adev->dev, "ipu psys poll res %u\n", res);
+
+ return res;
+}
+
+static long ipu_get_manifest(struct ipu_psys_manifest *manifest,
+ struct ipu_psys_fh *fh)
+{
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_device *isp = psys->adev->isp;
+ struct ipu_cpd_client_pkg_hdr *client_pkg;
+ u32 entries;
+ void *host_fw_data;
+ dma_addr_t dma_fw_data;
+ u32 client_pkg_offset;
+
+ host_fw_data = (void *)isp->cpd_fw->data;
+ dma_fw_data = sg_dma_address(psys->fw_sgt.sgl);
+
+ entries = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
+ if (!manifest || manifest->index > entries - 1) {
+ dev_err(&psys->adev->dev, "invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (!ipu_cpd_pkg_dir_get_size(psys->pkg_dir, manifest->index) ||
+ ipu_cpd_pkg_dir_get_type(psys->pkg_dir, manifest->index) <
+ IPU_CPD_PKG_DIR_CLIENT_PG_TYPE) {
+ dev_dbg(&psys->adev->dev, "invalid pkg dir entry\n");
+ return -ENOENT;
+ }
+
+ client_pkg_offset = ipu_cpd_pkg_dir_get_address(psys->pkg_dir,
+ manifest->index);
+ client_pkg_offset -= dma_fw_data;
+
+ client_pkg = host_fw_data + client_pkg_offset;
+ manifest->size = client_pkg->pg_manifest_size;
+
+ if (!manifest->manifest)
+ return 0;
+
+ if (copy_to_user(manifest->manifest,
+ (uint8_t *)client_pkg + client_pkg->pg_manifest_offs,
+ manifest->size)) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long ipu_psys_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ union {
+ struct ipu_psys_buffer buf;
+ struct ipu_psys_command cmd;
+ struct ipu_psys_event ev;
+ struct ipu_psys_capability caps;
+ struct ipu_psys_manifest m;
+ } karg;
+ struct ipu_psys_fh *fh = file->private_data;
+ long err = 0;
+ void __user *up = (void __user *)arg;
+ bool copy = (cmd != IPU_IOC_MAPBUF && cmd != IPU_IOC_UNMAPBUF);
+
+ if (copy) {
+ if (_IOC_SIZE(cmd) > sizeof(karg))
+ return -ENOTTY;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ err = copy_from_user(&karg, up, _IOC_SIZE(cmd));
+ if (err)
+ return -EFAULT;
+ }
+ }
+
+ switch (cmd) {
+ case IPU_IOC_MAPBUF:
+ err = ipu_psys_mapbuf(arg, fh);
+ break;
+ case IPU_IOC_UNMAPBUF:
+ err = ipu_psys_unmapbuf(arg, fh);
+ break;
+ case IPU_IOC_QUERYCAP:
+ karg.caps = fh->psys->caps;
+ break;
+ case IPU_IOC_GETBUF:
+ err = ipu_psys_getbuf(&karg.buf, fh);
+ break;
+ case IPU_IOC_PUTBUF:
+ err = ipu_psys_putbuf(&karg.buf, fh);
+ break;
+ case IPU_IOC_QCMD:
+ err = ipu_psys_kcmd_new(&karg.cmd, fh);
+ break;
+ case IPU_IOC_DQEVENT:
+ err = ipu_ioctl_dqevent(&karg.ev, fh, file->f_flags);
+ break;
+ case IPU_IOC_GET_MANIFEST:
+ err = ipu_get_manifest(&karg.m, fh);
+ break;
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ if (copy && _IOC_DIR(cmd) & _IOC_READ)
+ if (copy_to_user(up, &karg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static const struct file_operations ipu_psys_fops = {
+ .open = ipu_psys_open,
+ .release = ipu_psys_release,
+ .unlocked_ioctl = ipu_psys_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ipu_psys_compat_ioctl32,
+#endif
+ .poll = ipu_psys_poll,
+ .owner = THIS_MODULE,
+};
+
+static void ipu_psys_dev_release(struct device *dev)
+{
+}
+
+#ifdef CONFIG_PM
+static int psys_runtime_pm_resume(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+ unsigned long flags;
+ int retval;
+
+ if (!psys)
+ return 0;
+
+ /*
+ * In runtime autosuspend mode, if the psys is in power on state, no
+ * need to resume again.
+ */
+ spin_lock_irqsave(&psys->ready_lock, flags);
+ if (psys->ready) {
+ spin_unlock_irqrestore(&psys->ready_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&psys->ready_lock, flags);
+
+ retval = ipu_mmu_hw_init(adev->mmu);
+ if (retval)
+ return retval;
+
+ if (async_fw_init && !psys->fwcom) {
+ dev_err(dev,
+ "%s: asynchronous firmware init not finished, skipping\n",
+ __func__);
+ return 0;
+ }
+
+ if (!ipu_buttress_auth_done(adev->isp)) {
+ dev_dbg(dev, "%s: not yet authenticated, skipping\n", __func__);
+ return 0;
+ }
+
+ ipu_psys_setup_hw(psys);
+
+ ipu_psys_subdomains_power(psys, 1);
+ ipu_trace_restore(&psys->adev->dev);
+
+ ipu_configure_spc(adev->isp,
+ &psys->pdata->ipdata->hw_variant,
+ IPU_CPD_PKG_DIR_PSYS_SERVER_IDX,
+ psys->pdata->base, psys->pkg_dir,
+ psys->pkg_dir_dma_addr);
+
+ retval = ipu_fw_psys_open(psys);
+ if (retval) {
+ dev_err(&psys->adev->dev, "Failed to open abi.\n");
+ return retval;
+ }
+
+ spin_lock_irqsave(&psys->ready_lock, flags);
+ psys->ready = 1;
+ spin_unlock_irqrestore(&psys->ready_lock, flags);
+
+ return 0;
+}
+
+static int psys_runtime_pm_suspend(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+ unsigned long flags;
+ int rval;
+
+ if (!psys)
+ return 0;
+
+ if (!psys->ready)
+ return 0;
+
+ spin_lock_irqsave(&psys->ready_lock, flags);
+ psys->ready = 0;
+ spin_unlock_irqrestore(&psys->ready_lock, flags);
+
+ /*
+ * We can trace failure but better to not return an error.
+ * At suspend we are progressing towards psys power gated state.
+ * Any hang / failure inside psys will be forgotten soon.
+ */
+ rval = ipu_fw_psys_close(psys);
+ if (rval)
+ dev_err(dev, "Device close failure: %d\n", rval);
+
+ ipu_psys_subdomains_power(psys, 0);
+
+ ipu_mmu_hw_cleanup(adev->mmu);
+
+ return 0;
+}
+
+/* The following PM callbacks are needed to enable runtime PM in IPU PCI
+ * device resume, otherwise, runtime PM can't work in PCI resume from
+ * S3 state.
+ */
+static int psys_resume(struct device *dev)
+{
+ return 0;
+}
+
+static int psys_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops psys_pm_ops = {
+ .runtime_suspend = psys_runtime_pm_suspend,
+ .runtime_resume = psys_runtime_pm_resume,
+ .suspend = psys_suspend,
+ .resume = psys_resume,
+};
+
+#define PSYS_PM_OPS (&psys_pm_ops)
+#else
+#define PSYS_PM_OPS NULL
+#endif
+
+static int cpd_fw_reload(struct ipu_device *isp)
+{
+ struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys);
+ int rval;
+
+ if (!isp->secure_mode) {
+ dev_warn(&isp->pdev->dev,
+ "CPD firmware reload was only supported for secure mode.\n");
+ return -EINVAL;
+ }
+
+ if (isp->cpd_fw) {
+ ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
+ psys->pkg_dir_dma_addr,
+ psys->pkg_dir_size);
+
+ ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
+ release_firmware(isp->cpd_fw);
+ isp->cpd_fw = NULL;
+ dev_info(&isp->pdev->dev, "Old FW removed\n");
+ }
+
+ rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name,
+ &isp->pdev->dev);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Requesting firmware(%s) failed\n",
+ isp->cpd_fw_name);
+ return rval;
+ }
+
+ rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
+ isp->cpd_fw->size);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Failed to validate cpd file\n");
+ goto out_release_firmware;
+ }
+
+ rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw, &psys->fw_sgt);
+ if (rval)
+ goto out_release_firmware;
+
+ psys->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys,
+ isp->cpd_fw->data,
+ sg_dma_address(psys->fw_sgt.sgl),
+ &psys->pkg_dir_dma_addr,
+ &psys->pkg_dir_size);
+
+ if (!psys->pkg_dir) {
+ rval = -EINVAL;
+ goto out_unmap_fw_image;
+ }
+
+ isp->pkg_dir = psys->pkg_dir;
+ isp->pkg_dir_dma_addr = psys->pkg_dir_dma_addr;
+ isp->pkg_dir_size = psys->pkg_dir_size;
+
+ if (!isp->secure_mode)
+ return 0;
+
+ rval = ipu_fw_authenticate(isp, 1);
+ if (rval)
+ goto out_free_pkg_dir;
+
+ return 0;
+
+out_free_pkg_dir:
+ ipu_cpd_free_pkg_dir(isp->psys, psys->pkg_dir,
+ psys->pkg_dir_dma_addr, psys->pkg_dir_size);
+out_unmap_fw_image:
+ ipu_buttress_unmap_fw_image(isp->psys, &psys->fw_sgt);
+out_release_firmware:
+ release_firmware(isp->cpd_fw);
+ isp->cpd_fw = NULL;
+
+ return rval;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int ipu_psys_icache_prefetch_sp_get(void *data, u64 *val)
+{
+ struct ipu_psys *psys = data;
+
+ *val = psys->icache_prefetch_sp;
+ return 0;
+}
+
+static int ipu_psys_icache_prefetch_sp_set(void *data, u64 val)
+{
+ struct ipu_psys *psys = data;
+
+ if (val != !!val)
+ return -EINVAL;
+
+ psys->icache_prefetch_sp = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_sp_fops,
+ ipu_psys_icache_prefetch_sp_get,
+ ipu_psys_icache_prefetch_sp_set, "%llu\n");
+
+static int ipu_psys_icache_prefetch_isp_get(void *data, u64 *val)
+{
+ struct ipu_psys *psys = data;
+
+ *val = psys->icache_prefetch_isp;
+ return 0;
+}
+
+static int ipu_psys_icache_prefetch_isp_set(void *data, u64 val)
+{
+ struct ipu_psys *psys = data;
+
+ if (val != !!val)
+ return -EINVAL;
+
+ psys->icache_prefetch_isp = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_icache_prefetch_isp_fops,
+ ipu_psys_icache_prefetch_isp_get,
+ ipu_psys_icache_prefetch_isp_set, "%llu\n");
+
+static int ipu_psys_init_debugfs(struct ipu_psys *psys)
+{
+ struct dentry *file;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("psys", psys->adev->isp->ipu_dir);
+ if (IS_ERR(dir))
+ return -ENOMEM;
+
+ file = debugfs_create_file("icache_prefetch_sp", 0600,
+ dir, psys, &psys_icache_prefetch_sp_fops);
+ if (IS_ERR(file))
+ goto err;
+
+ file = debugfs_create_file("icache_prefetch_isp", 0600,
+ dir, psys, &psys_icache_prefetch_isp_fops);
+ if (IS_ERR(file))
+ goto err;
+
+ psys->debugfsdir = dir;
+
+#ifdef IPU_PSYS_GPC
+ if (ipu_psys_gpc_init_debugfs(psys))
+ return -ENOMEM;
+#endif
+
+ return 0;
+err:
+ debugfs_remove_recursive(dir);
+ return -ENOMEM;
+}
+#endif
+
+static int ipu_psys_sched_cmd(void *ptr)
+{
+ struct ipu_psys *psys = ptr;
+ size_t pending = 0;
+
+ while (1) {
+ wait_event_interruptible(psys->sched_cmd_wq,
+ (kthread_should_stop() ||
+ (pending =
+ atomic_read(&psys->wakeup_count))));
+
+ if (kthread_should_stop())
+ break;
+
+ if (pending == 0)
+ continue;
+
+ mutex_lock(&psys->mutex);
+ atomic_set(&psys->wakeup_count, 0);
+ ipu_psys_run_next(psys);
+ mutex_unlock(&psys->mutex);
+ }
+
+ return 0;
+}
+
+static void start_sp(struct ipu_bus_device *adev)
+{
+ struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+ void __iomem *spc_regs_base = psys->pdata->base +
+ psys->pdata->ipdata->hw_variant.spc_offset;
+ u32 val = 0;
+
+ val |= IPU_PSYS_SPC_STATUS_START |
+ IPU_PSYS_SPC_STATUS_RUN |
+ IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+ val |= psys->icache_prefetch_sp ?
+ IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH : 0;
+ writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+}
+
+static int query_sp(struct ipu_bus_device *adev)
+{
+ struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+ void __iomem *spc_regs_base = psys->pdata->base +
+ psys->pdata->ipdata->hw_variant.spc_offset;
+ u32 val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+
+ /* return true when READY == 1, START == 0 */
+ val &= IPU_PSYS_SPC_STATUS_READY | IPU_PSYS_SPC_STATUS_START;
+
+ return val == IPU_PSYS_SPC_STATUS_READY;
+}
+
+static int ipu_psys_fw_init(struct ipu_psys *psys)
+{
+ unsigned int size;
+ struct ipu_fw_syscom_queue_config *queue_cfg;
+ struct ipu_fw_syscom_queue_config fw_psys_event_queue_cfg[] = {
+ {
+ IPU_FW_PSYS_EVENT_QUEUE_SIZE,
+ sizeof(struct ipu_fw_psys_event)
+ }
+ };
+ struct ipu_fw_psys_srv_init server_init = {
+ .ddr_pkg_dir_address = 0,
+ .host_ddr_pkg_dir = NULL,
+ .pkg_dir_size = 0,
+ .icache_prefetch_sp = psys->icache_prefetch_sp,
+ .icache_prefetch_isp = psys->icache_prefetch_isp,
+ };
+ struct ipu_fw_com_cfg fwcom = {
+ .num_output_queues = IPU_FW_PSYS_N_PSYS_EVENT_QUEUE_ID,
+ .output = fw_psys_event_queue_cfg,
+ .specific_addr = &server_init,
+ .specific_size = sizeof(server_init),
+ .cell_start = start_sp,
+ .cell_ready = query_sp,
+ .buttress_boot_offset = SYSCOM_BUTTRESS_FW_PARAMS_PSYS_OFFSET,
+ };
+ int i;
+
+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP)
+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+
+ queue_cfg = devm_kzalloc(&psys->adev->dev, sizeof(*queue_cfg) * size,
+ GFP_KERNEL);
+ if (!queue_cfg)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ queue_cfg[i].queue_size = IPU_FW_PSYS_CMD_QUEUE_SIZE;
+ queue_cfg[i].token_size = sizeof(struct ipu_fw_psys_cmd);
+ }
+
+ fwcom.input = queue_cfg;
+ fwcom.num_input_queues = size;
+ fwcom.dmem_addr = psys->pdata->ipdata->hw_variant.dmem_offset;
+
+ psys->fwcom = ipu_fw_com_prepare(&fwcom, psys->adev, psys->pdata->base);
+ if (!psys->fwcom) {
+ dev_err(&psys->adev->dev, "psys fw com prepare failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void run_fw_init_work(struct work_struct *work)
+{
+ struct fw_init_task *task = (struct fw_init_task *)work;
+ struct ipu_psys *psys = task->psys;
+ int rval;
+
+ rval = ipu_psys_fw_init(psys);
+
+ if (rval) {
+ dev_err(&psys->adev->dev, "FW init failed(%d)\n", rval);
+ ipu_psys_remove(psys->adev);
+ } else {
+ dev_info(&psys->adev->dev, "FW init done\n");
+ }
+}
+
+static int ipu_psys_probe(struct ipu_bus_device *adev)
+{
+ struct ipu_device *isp = adev->isp;
+ struct ipu_psys_pg *kpg, *kpg0;
+ struct ipu_psys *psys;
+ unsigned int minor;
+ int i, rval = -E2BIG;
+
+ rval = ipu_mmu_hw_init(adev->mmu);
+ if (rval)
+ return rval;
+
+ mutex_lock(&ipu_psys_mutex);
+
+ minor = find_next_zero_bit(ipu_psys_devices, IPU_PSYS_NUM_DEVICES, 0);
+ if (minor == IPU_PSYS_NUM_DEVICES) {
+ dev_err(&adev->dev, "too many devices\n");
+ goto out_unlock;
+ }
+
+ psys = devm_kzalloc(&adev->dev, sizeof(*psys), GFP_KERNEL);
+ if (!psys) {
+ rval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ psys->adev = adev;
+ psys->pdata = adev->pdata;
+ psys->icache_prefetch_sp = 0;
+
+ psys->power_gating = 0;
+
+ ipu_trace_init(adev->isp, psys->pdata->base, &adev->dev,
+ psys_trace_blocks);
+
+ cdev_init(&psys->cdev, &ipu_psys_fops);
+ psys->cdev.owner = ipu_psys_fops.owner;
+
+ rval = cdev_add(&psys->cdev, MKDEV(MAJOR(ipu_psys_dev_t), minor), 1);
+ if (rval) {
+ dev_err(&adev->dev, "cdev_add failed (%d)\n", rval);
+ goto out_unlock;
+ }
+
+ set_bit(minor, ipu_psys_devices);
+
+ spin_lock_init(&psys->ready_lock);
+ spin_lock_init(&psys->pgs_lock);
+ psys->ready = 0;
+ psys->timeout = IPU_PSYS_CMD_TIMEOUT_MS;
+
+ mutex_init(&psys->mutex);
+ INIT_LIST_HEAD(&psys->fhs);
+ INIT_LIST_HEAD(&psys->pgs);
+ INIT_LIST_HEAD(&psys->started_kcmds_list);
+
+ init_waitqueue_head(&psys->sched_cmd_wq);
+ atomic_set(&psys->wakeup_count, 0);
+ /*
+ * Create a thread to schedule commands sent to IPU firmware.
+ * The thread reduces the coupling between the command scheduler
+ * and queueing commands from the user to driver.
+ */
+ psys->sched_cmd_thread = kthread_run(ipu_psys_sched_cmd, psys,
+ "psys_sched_cmd");
+
+ if (IS_ERR(psys->sched_cmd_thread)) {
+ psys->sched_cmd_thread = NULL;
+ mutex_destroy(&psys->mutex);
+ goto out_unlock;
+ }
+
+ ipu_bus_set_drvdata(adev, psys);
+
+ rval = ipu_psys_resource_pool_init(&psys->resource_pool_running);
+ if (rval < 0) {
+ dev_err(&psys->dev,
+ "unable to alloc process group resources\n");
+ goto out_mutex_destroy;
+ }
+
+ ipu6_psys_hw_res_variant_init();
+ psys->pkg_dir = isp->pkg_dir;
+ psys->pkg_dir_dma_addr = isp->pkg_dir_dma_addr;
+ psys->pkg_dir_size = isp->pkg_dir_size;
+ psys->fw_sgt = isp->fw_sgt;
+
+ /* allocate and map memory for process groups */
+ for (i = 0; i < IPU_PSYS_PG_POOL_SIZE; i++) {
+ kpg = kzalloc(sizeof(*kpg), GFP_KERNEL);
+ if (!kpg)
+ goto out_free_pgs;
+ kpg->pg = dma_alloc_attrs(&adev->dev,
+ IPU_PSYS_PG_MAX_SIZE,
+ &kpg->pg_dma_addr,
+ GFP_KERNEL, 0);
+ if (!kpg->pg) {
+ kfree(kpg);
+ goto out_free_pgs;
+ }
+ kpg->size = IPU_PSYS_PG_MAX_SIZE;
+ list_add(&kpg->list, &psys->pgs);
+ }
+
+ psys->caps.pg_count = ipu_cpd_pkg_dir_get_num_entries(psys->pkg_dir);
+
+ dev_info(&adev->dev, "pkg_dir entry count:%d\n", psys->caps.pg_count);
+ if (async_fw_init) {
+ INIT_DELAYED_WORK((struct delayed_work *)&fw_init_task,
+ run_fw_init_work);
+ fw_init_task.psys = psys;
+ schedule_delayed_work((struct delayed_work *)&fw_init_task, 0);
+ } else {
+ rval = ipu_psys_fw_init(psys);
+ }
+
+ if (rval) {
+ dev_err(&adev->dev, "FW init failed(%d)\n", rval);
+ goto out_free_pgs;
+ }
+
+ psys->dev.parent = &adev->dev;
+ psys->dev.bus = &ipu_psys_bus;
+ psys->dev.devt = MKDEV(MAJOR(ipu_psys_dev_t), minor);
+ psys->dev.release = ipu_psys_dev_release;
+ dev_set_name(&psys->dev, "ipu-psys%d", minor);
+ rval = device_register(&psys->dev);
+ if (rval < 0) {
+ dev_err(&psys->dev, "psys device_register failed\n");
+ goto out_release_fw_com;
+ }
+
+ /* Add the hw stepping information to caps */
+ strlcpy(psys->caps.dev_model, IPU_MEDIA_DEV_MODEL_NAME,
+ sizeof(psys->caps.dev_model));
+
+ pm_runtime_set_autosuspend_delay(&psys->adev->dev,
+ IPU_PSYS_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&psys->adev->dev);
+ pm_runtime_mark_last_busy(&psys->adev->dev);
+
+ mutex_unlock(&ipu_psys_mutex);
+
+#ifdef CONFIG_DEBUG_FS
+ /* Debug fs failure is not fatal. */
+ ipu_psys_init_debugfs(psys);
+#endif
+
+ adev->isp->cpd_fw_reload = &cpd_fw_reload;
+
+ dev_info(&adev->dev, "psys probe minor: %d\n", minor);
+
+ ipu_mmu_hw_cleanup(adev->mmu);
+
+ return 0;
+
+out_release_fw_com:
+ ipu_fw_com_release(psys->fwcom, 1);
+out_free_pgs:
+ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
+ dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
+ kpg->pg_dma_addr, 0);
+ kfree(kpg);
+ }
+
+ ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
+out_mutex_destroy:
+ mutex_destroy(&psys->mutex);
+ cdev_del(&psys->cdev);
+ if (psys->sched_cmd_thread) {
+ kthread_stop(psys->sched_cmd_thread);
+ psys->sched_cmd_thread = NULL;
+ }
+out_unlock:
+ /* Safe to call even if the init is not called */
+ ipu_trace_uninit(&adev->dev);
+ mutex_unlock(&ipu_psys_mutex);
+
+ ipu_mmu_hw_cleanup(adev->mmu);
+
+ return rval;
+}
+
+static void ipu_psys_remove(struct ipu_bus_device *adev)
+{
+ struct ipu_device *isp = adev->isp;
+ struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+ struct ipu_psys_pg *kpg, *kpg0;
+
+#ifdef CONFIG_DEBUG_FS
+ if (isp->ipu_dir)
+ debugfs_remove_recursive(psys->debugfsdir);
+#endif
+
+ flush_workqueue(IPU_PSYS_WORK_QUEUE);
+
+ if (psys->sched_cmd_thread) {
+ kthread_stop(psys->sched_cmd_thread);
+ psys->sched_cmd_thread = NULL;
+ }
+
+ pm_runtime_dont_use_autosuspend(&psys->adev->dev);
+
+ mutex_lock(&ipu_psys_mutex);
+
+ list_for_each_entry_safe(kpg, kpg0, &psys->pgs, list) {
+ dma_free_attrs(&adev->dev, kpg->size, kpg->pg,
+ kpg->pg_dma_addr, 0);
+ kfree(kpg);
+ }
+
+ if (psys->fwcom && ipu_fw_com_release(psys->fwcom, 1))
+ dev_err(&adev->dev, "fw com release failed.\n");
+
+ kfree(psys->server_init);
+ kfree(psys->syscom_config);
+
+ ipu_trace_uninit(&adev->dev);
+
+ ipu_psys_resource_pool_cleanup(&psys->resource_pool_running);
+
+ device_unregister(&psys->dev);
+
+ clear_bit(MINOR(psys->cdev.dev), ipu_psys_devices);
+ cdev_del(&psys->cdev);
+
+ mutex_unlock(&ipu_psys_mutex);
+
+ mutex_destroy(&psys->mutex);
+
+ dev_info(&adev->dev, "removed\n");
+}
+
+static irqreturn_t psys_isr_threaded(struct ipu_bus_device *adev)
+{
+ struct ipu_psys *psys = ipu_bus_get_drvdata(adev);
+ void __iomem *base = psys->pdata->base;
+ u32 status;
+ int r;
+
+ mutex_lock(&psys->mutex);
+#ifdef CONFIG_PM
+ r = pm_runtime_get_if_in_use(&psys->adev->dev);
+ if (!r || WARN_ON_ONCE(r < 0)) {
+ mutex_unlock(&psys->mutex);
+ return IRQ_NONE;
+ }
+#endif
+
+ status = readl(base + IPU_REG_PSYS_GPDEV_IRQ_STATUS);
+ writel(status, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR);
+
+ if (status & IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0)) {
+ writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0));
+ ipu_psys_handle_events(psys);
+ }
+
+ pm_runtime_mark_last_busy(&psys->adev->dev);
+ pm_runtime_put_autosuspend(&psys->adev->dev);
+ mutex_unlock(&psys->mutex);
+
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static struct ipu_bus_driver ipu_psys_driver = {
+ .probe = ipu_psys_probe,
+ .remove = ipu_psys_remove,
+ .isr_threaded = psys_isr_threaded,
+ .wanted = IPU_PSYS_NAME,
+ .drv = {
+ .name = IPU_PSYS_NAME,
+ .owner = THIS_MODULE,
+ .pm = PSYS_PM_OPS,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+static int __init ipu_psys_init(void)
+{
+ int rval = alloc_chrdev_region(&ipu_psys_dev_t, 0,
+ IPU_PSYS_NUM_DEVICES, IPU_PSYS_NAME);
+ if (rval) {
+ pr_err("can't alloc psys chrdev region (%d)\n", rval);
+ return rval;
+ }
+
+ rval = bus_register(&ipu_psys_bus);
+ if (rval) {
+ pr_warn("can't register psys bus (%d)\n", rval);
+ goto out_bus_register;
+ }
+
+ ipu_bus_register_driver(&ipu_psys_driver);
+
+ return rval;
+
+out_bus_register:
+ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
+
+ return rval;
+}
+
+static void __exit ipu_psys_exit(void)
+{
+ ipu_bus_unregister_driver(&ipu_psys_driver);
+ bus_unregister(&ipu_psys_bus);
+ unregister_chrdev_region(ipu_psys_dev_t, IPU_PSYS_NUM_DEVICES);
+}
+
+static const struct pci_device_id ipu_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_P_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_N_PCI_ID)},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
+
+module_init(ipu_psys_init);
+module_exit(ipu_psys_exit);
+
+MODULE_AUTHOR("Antti Laakso <antti.laakso@intel.com>");
+MODULE_AUTHOR("Bin Han <bin.b.han@intel.com>");
+MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
+MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
+MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu processing system driver");
new file mode 100644
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_PSYS_H
+#define IPU_PSYS_H
+
+#include <linux/cdev.h>
+#include <linux/workqueue.h>
+
+#include "ipu.h"
+#include "ipu-pdata.h"
+#include "ipu-fw-psys.h"
+#include "ipu-platform-psys.h"
+
+#define IPU_PSYS_PG_POOL_SIZE 16
+#define IPU_PSYS_PG_MAX_SIZE 8192
+#define IPU_MAX_PSYS_CMD_BUFFERS 32
+#define IPU_PSYS_EVENT_CMD_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS
+#define IPU_PSYS_EVENT_FRAGMENT_COMPLETE IPU_FW_PSYS_EVENT_TYPE_SUCCESS
+#define IPU_PSYS_CLOSE_TIMEOUT_US 50
+#define IPU_PSYS_CLOSE_TIMEOUT (100000 / IPU_PSYS_CLOSE_TIMEOUT_US)
+#define IPU_PSYS_WORK_QUEUE system_power_efficient_wq
+#define IPU_MAX_RESOURCES 128
+
+/* Opaque structure. Do not access fields. */
+struct ipu_resource {
+ u32 id;
+ int elements; /* Number of elements available to allocation */
+ unsigned long *bitmap; /* Allocation bitmap, a bit for each element */
+};
+
+enum ipu_resource_type {
+ IPU_RESOURCE_DEV_CHN = 0,
+ IPU_RESOURCE_EXT_MEM,
+ IPU_RESOURCE_DFM
+};
+
+/* Allocation of resource(s) */
+/* Opaque structure. Do not access fields. */
+struct ipu_resource_alloc {
+ enum ipu_resource_type type;
+ struct ipu_resource *resource;
+ int elements;
+ int pos;
+};
+
+/*
+ * This struct represents all of the currently allocated
+ * resources from IPU model. It is used also for allocating
+ * resources for the next set of PGs to be run on IPU
+ * (ie. those PGs which are not yet being run and which don't
+ * yet reserve real IPU resources).
+ * Use larger array to cover existing resource quantity
+ */
+
+/* resource size may need expand for new resource model */
+struct ipu_psys_resource_pool {
+ u32 cells; /* Bitmask of cells allocated */
+ struct ipu_resource dev_channels[16];
+ struct ipu_resource ext_memory[32];
+ struct ipu_resource dfms[16];
+ DECLARE_BITMAP(cmd_queues, 32);
+ /* Protects cmd_queues bitmap */
+ spinlock_t queues_lock;
+};
+
+/*
+ * This struct keeps book of the resources allocated for a specific PG.
+ * It is used for freeing up resources from struct ipu_psys_resources
+ * when the PG is released from IPU (or model of IPU).
+ */
+struct ipu_psys_resource_alloc {
+ u32 cells; /* Bitmask of cells needed */
+ struct ipu_resource_alloc
+ resource_alloc[IPU_MAX_RESOURCES];
+ int resources;
+};
+
+struct task_struct;
+struct ipu_psys {
+ struct ipu_psys_capability caps;
+ struct cdev cdev;
+ struct device dev;
+
+ struct mutex mutex; /* Psys various */
+ int ready; /* psys fw status */
+ bool icache_prefetch_sp;
+ bool icache_prefetch_isp;
+ spinlock_t ready_lock; /* protect psys firmware state */
+ spinlock_t pgs_lock; /* Protect pgs list access */
+ struct list_head fhs;
+ struct list_head pgs;
+ struct list_head started_kcmds_list;
+ struct ipu_psys_pdata *pdata;
+ struct ipu_bus_device *adev;
+ struct ia_css_syscom_context *dev_ctx;
+ struct ia_css_syscom_config *syscom_config;
+ struct ia_css_psys_server_init *server_init;
+ struct task_struct *sched_cmd_thread;
+ wait_queue_head_t sched_cmd_wq;
+ atomic_t wakeup_count; /* Psys schedule thread wakeup count */
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfsdir;
+#endif
+
+ /* Resources needed to be managed for process groups */
+ struct ipu_psys_resource_pool resource_pool_running;
+
+ const struct firmware *fw;
+ struct sg_table fw_sgt;
+ u64 *pkg_dir;
+ dma_addr_t pkg_dir_dma_addr;
+ unsigned int pkg_dir_size;
+ unsigned long timeout;
+
+ int active_kcmds, started_kcmds;
+ void *fwcom;
+
+ int power_gating;
+};
+
+struct ipu_psys_fh {
+ struct ipu_psys *psys;
+ struct mutex mutex; /* Protects bufmap & kcmds fields */
+ struct list_head list;
+ struct list_head bufmap;
+ wait_queue_head_t wait;
+ struct ipu_psys_scheduler sched;
+};
+
+struct ipu_psys_pg {
+ struct ipu_fw_psys_process_group *pg;
+ size_t size;
+ size_t pg_size;
+ dma_addr_t pg_dma_addr;
+ struct list_head list;
+ struct ipu_psys_resource_alloc resource_alloc;
+};
+
+struct ipu_psys_kcmd {
+ struct ipu_psys_fh *fh;
+ struct list_head list;
+ struct ipu_psys_buffer_set *kbuf_set;
+ enum ipu_psys_cmd_state state;
+ void *pg_manifest;
+ size_t pg_manifest_size;
+ struct ipu_psys_kbuffer **kbufs;
+ struct ipu_psys_buffer *buffers;
+ size_t nbuffers;
+ struct ipu_fw_psys_process_group *pg_user;
+ struct ipu_psys_pg *kpg;
+ u64 user_token;
+ u64 issue_id;
+ u32 priority;
+ u32 kernel_enable_bitmap[4];
+ u32 terminal_enable_bitmap[4];
+ u32 routing_enable_bitmap[4];
+ u32 rbm[5];
+ struct ipu_buttress_constraint constraint;
+ struct ipu_psys_event ev;
+ struct timer_list watchdog;
+};
+
+struct ipu_dma_buf_attach {
+ struct device *dev;
+ u64 len;
+ void *userptr;
+ struct sg_table *sgt;
+ bool vma_is_io;
+ struct page **pages;
+ size_t npages;
+};
+
+struct ipu_psys_kbuffer {
+ u64 len;
+ void *userptr;
+ u32 flags;
+ int fd;
+ void *kaddr;
+ struct list_head list;
+ dma_addr_t dma_addr;
+ struct sg_table *sgt;
+ struct dma_buf_attachment *db_attach;
+ struct dma_buf *dbuf;
+ bool valid; /* True when buffer is usable */
+};
+
+#define inode_to_ipu_psys(inode) \
+ container_of((inode)->i_cdev, struct ipu_psys, cdev)
+
+#ifdef CONFIG_COMPAT
+long ipu_psys_compat_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#endif
+
+void ipu_psys_setup_hw(struct ipu_psys *psys);
+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on);
+void ipu_psys_handle_events(struct ipu_psys *psys);
+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh);
+void ipu_psys_run_next(struct ipu_psys *psys);
+struct ipu_psys_pg *__get_pg_buf(struct ipu_psys *psys, size_t pg_size);
+struct ipu_psys_kbuffer *
+ipu_psys_lookup_kbuffer(struct ipu_psys_fh *fh, int fd);
+int ipu_psys_mapbuf_locked(int fd, struct ipu_psys_fh *fh,
+ struct ipu_psys_kbuffer *kbuf);
+struct ipu_psys_kbuffer *
+ipu_psys_lookup_kbuffer_by_kaddr(struct ipu_psys_fh *fh, void *kaddr);
+#ifdef IPU_PSYS_GPC
+int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys);
+#endif
+int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool *pool);
+void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool *pool);
+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh);
+long ipu_ioctl_dqevent(struct ipu_psys_event *event,
+ struct ipu_psys_fh *fh, unsigned int f_flags);
+
+#endif /* IPU_PSYS_H */
new file mode 100644
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2014 - 2021 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include "ipu.h"
+#include "ipu-platform-regs.h"
+#include "ipu-trace.h"
+
+struct trace_register_range {
+ u32 start;
+ u32 end;
+};
+
+#define MEMORY_RING_BUFFER_SIZE (SZ_1M * 32)
+#define TRACE_MESSAGE_SIZE 16
+/*
+ * It looks that the trace unit sometimes writes outside the given buffer.
+ * To avoid memory corruption one extra page is reserved at the end
+ * of the buffer. Read also the extra area since it may contain valid data.
+ */
+#define MEMORY_RING_BUFFER_GUARD PAGE_SIZE
+#define MEMORY_RING_BUFFER_OVERREAD MEMORY_RING_BUFFER_GUARD
+#define MAX_TRACE_REGISTERS 200
+#define TRACE_CONF_DUMP_BUFFER_SIZE (MAX_TRACE_REGISTERS * 2 * 32)
+#define TRACE_CONF_DATA_MAX_LEN (1024 * 4)
+#define WPT_TRACE_CONF_DATA_MAX_LEN (1024 * 64)
+
+struct config_value {
+ u32 reg;
+ u32 value;
+};
+
+struct ipu_trace_buffer {
+ dma_addr_t dma_handle;
+ void *memory_buffer;
+};
+
+struct ipu_subsystem_wptrace_config {
+ bool open;
+ char *conf_dump_buffer;
+ int size_conf_dump;
+ unsigned int fill_level;
+ struct config_value config[MAX_TRACE_REGISTERS];
+};
+
+struct ipu_subsystem_trace_config {
+ u32 offset;
+ void __iomem *base;
+ struct ipu_trace_buffer memory; /* ring buffer */
+ struct device *dev;
+ struct ipu_trace_block *blocks;
+ unsigned int fill_level; /* Nbr of regs in config table below */
+ bool running;
+ /* Cached register values */
+ struct config_value config[MAX_TRACE_REGISTERS];
+ /* watchpoint trace info */
+ struct ipu_subsystem_wptrace_config wpt;
+};
+
+struct ipu_trace {
+ struct mutex lock; /* Protect ipu trace operations */
+ bool open;
+ char *conf_dump_buffer;
+ int size_conf_dump;
+
+ struct ipu_subsystem_trace_config isys;
+ struct ipu_subsystem_trace_config psys;
+};
+
+static void __ipu_trace_restore(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_device *isp = adev->isp;
+ struct ipu_trace *trace = isp->trace;
+ struct config_value *config;
+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg;
+ struct ipu_trace_block *blocks;
+ u32 mapped_trace_buffer;
+ void __iomem *addr = NULL;
+ int i;
+
+ if (trace->open) {
+ dev_info(dev, "Trace control file open. Skipping update\n");
+ return;
+ }
+
+ if (!sys)
+ return;
+
+ /* leave if no trace configuration for this subsystem */
+ if (sys->fill_level == 0)
+ return;
+
+ /* Find trace unit base address */
+ blocks = sys->blocks;
+ while (blocks->type != IPU_TRACE_BLOCK_END) {
+ if (blocks->type == IPU_TRACE_BLOCK_TUN) {
+ addr = sys->base + blocks->offset;
+ break;
+ }
+ blocks++;
+ }
+ if (!addr)
+ return;
+
+ if (!sys->memory.memory_buffer) {
+ sys->memory.memory_buffer =
+ dma_alloc_coherent(dev, MEMORY_RING_BUFFER_SIZE +
+ MEMORY_RING_BUFFER_GUARD,
+ &sys->memory.dma_handle,
+ GFP_KERNEL);
+ }
+
+ if (!sys->memory.memory_buffer) {
+ dev_err(dev, "No memory for tracing. Trace unit disabled\n");
+ return;
+ }
+
+ config = sys->config;
+ mapped_trace_buffer = sys->memory.dma_handle;
+
+ /* ring buffer base */
+ writel(mapped_trace_buffer, addr + TRACE_REG_TUN_DRAM_BASE_ADDR);
+
+ /* ring buffer end */
+ writel(mapped_trace_buffer + MEMORY_RING_BUFFER_SIZE -
+ TRACE_MESSAGE_SIZE, addr + TRACE_REG_TUN_DRAM_END_ADDR);
+
+ /* Infobits for ddr trace */
+ writel(IPU_INFO_REQUEST_DESTINATION_PRIMARY,
+ addr + TRACE_REG_TUN_DDR_INFO_VAL);
+
+ /* Find trace timer reset address */
+ addr = NULL;
+ blocks = sys->blocks;
+ while (blocks->type != IPU_TRACE_BLOCK_END) {
+ if (blocks->type == IPU_TRACE_TIMER_RST) {
+ addr = sys->base + blocks->offset;
+ break;
+ }
+ blocks++;
+ }
+ if (!addr) {
+ dev_err(dev, "No trace reset addr\n");
+ return;
+ }
+
+ /* Remove reset from trace timers */
+ writel(TRACE_REG_GPREG_TRACE_TIMER_RST_OFF, addr);
+
+ /* Register config received from userspace */
+ for (i = 0; i < sys->fill_level; i++) {
+ dev_dbg(dev,
+ "Trace restore: reg 0x%08x, value 0x%08x\n",
+ config[i].reg, config[i].value);
+ writel(config[i].value, isp->base + config[i].reg);
+ }
+
+ /* Register wpt config received from userspace, and only psys has wpt */
+ config = sys->wpt.config;
+ for (i = 0; i < sys->wpt.fill_level; i++) {
+ dev_dbg(dev, "Trace restore: reg 0x%08x, value 0x%08x\n",
+ config[i].reg, config[i].value);
+ writel(config[i].value, isp->base + config[i].reg);
+ }
+ sys->running = true;
+}
+
+void ipu_trace_restore(struct device *dev)
+{
+ struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace;
+
+ if (!trace)
+ return;
+
+ mutex_lock(&trace->lock);
+ __ipu_trace_restore(dev);
+ mutex_unlock(&trace->lock);
+}
+EXPORT_SYMBOL_GPL(ipu_trace_restore);
+
+static void __ipu_trace_stop(struct device *dev)
+{
+ struct ipu_subsystem_trace_config *sys =
+ to_ipu_bus_device(dev)->trace_cfg;
+ struct ipu_trace_block *blocks;
+
+ if (!sys)
+ return;
+
+ if (!sys->running)
+ return;
+ sys->running = false;
+
+ /* Turn off all the gpc blocks */
+ blocks = sys->blocks;
+ while (blocks->type != IPU_TRACE_BLOCK_END) {
+ if (blocks->type == IPU_TRACE_BLOCK_GPC) {
+ writel(0, sys->base + blocks->offset +
+ TRACE_REG_GPC_OVERALL_ENABLE);
+ }
+ blocks++;
+ }
+
+ /* Turn off all the trace monitors */
+ blocks = sys->blocks;
+ while (blocks->type != IPU_TRACE_BLOCK_END) {
+ if (blocks->type == IPU_TRACE_BLOCK_TM) {
+ writel(0, sys->base + blocks->offset +
+ TRACE_REG_TM_TRACE_ENABLE_NPK);
+
+ writel(0, sys->base + blocks->offset +
+ TRACE_REG_TM_TRACE_ENABLE_DDR);
+ }
+ blocks++;
+ }
+
+ /* Turn off trace units */
+ blocks = sys->blocks;
+ while (blocks->type != IPU_TRACE_BLOCK_END) {
+ if (blocks->type == IPU_TRACE_BLOCK_TUN) {
+ writel(0, sys->base + blocks->offset +
+ TRACE_REG_TUN_DDR_ENABLE);
+ writel(0, sys->base + blocks->offset +
+ TRACE_REG_TUN_NPK_ENABLE);
+ }
+ blocks++;
+ }
+}
+
+void ipu_trace_stop(struct device *dev)
+{
+ struct ipu_trace *trace = to_ipu_bus_device(dev)->isp->trace;
+
+ if (!trace)
+ return;
+
+ mutex_lock(&trace->lock);
+ __ipu_trace_stop(dev);
+ mutex_unlock(&trace->lock);
+}
+EXPORT_SYMBOL_GPL(ipu_trace_stop);
+
+static int update_register_cache(struct ipu_device *isp, u32 reg, u32 value)
+{
+ struct ipu_trace *dctrl = isp->trace;
+ struct ipu_subsystem_trace_config *sys;
+ int rval = -EINVAL;
+
+ if (dctrl->isys.offset == dctrl->psys.offset) {
+ /* For the IPU with uniform address space */
+ if (reg >= IPU_ISYS_OFFSET &&
+ reg < IPU_ISYS_OFFSET + TRACE_REG_MAX_ISYS_OFFSET)
+ sys = &dctrl->isys;
+ else if (reg >= IPU_PSYS_OFFSET &&
+ reg < IPU_PSYS_OFFSET + TRACE_REG_MAX_PSYS_OFFSET)
+ sys = &dctrl->psys;
+ else
+ goto error;
+ } else {
+ if (dctrl->isys.offset &&
+ reg >= dctrl->isys.offset &&
+ reg < dctrl->isys.offset + TRACE_REG_MAX_ISYS_OFFSET)
+ sys = &dctrl->isys;
+ else if (dctrl->psys.offset &&
+ reg >= dctrl->psys.offset &&
+ reg < dctrl->psys.offset + TRACE_REG_MAX_PSYS_OFFSET)
+ sys = &dctrl->psys;
+ else
+ goto error;
+ }
+
+ if (sys->fill_level < MAX_TRACE_REGISTERS) {
+ dev_dbg(sys->dev,
+ "Trace reg addr 0x%08x value 0x%08x\n", reg, value);
+ sys->config[sys->fill_level].reg = reg;
+ sys->config[sys->fill_level].value = value;
+ sys->fill_level++;
+ } else {
+ rval = -ENOMEM;
+ goto error;
+ }
+ return 0;
+error:
+ dev_info(&isp->pdev->dev,
+ "Trace register address 0x%08x ignored as invalid register\n",
+ reg);
+ return rval;
+}
+
+static void traceconf_dump(struct ipu_device *isp)
+{
+ struct ipu_subsystem_trace_config *sys[2] = {
+ &isp->trace->isys,
+ &isp->trace->psys
+ };
+ int i, j, rem_size;
+ char *out;
+
+ isp->trace->size_conf_dump = 0;
+ out = isp->trace->conf_dump_buffer;
+ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE;
+
+ for (j = 0; j < ARRAY_SIZE(sys); j++) {
+ for (i = 0; i < sys[j]->fill_level && rem_size > 0; i++) {
+ int bytes_print;
+ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n",
+ sys[j]->config[i].reg,
+ sys[j]->config[i].value);
+
+ bytes_print = min(n, rem_size - 1);
+ rem_size -= bytes_print;
+ out += bytes_print;
+ }
+ }
+ isp->trace->size_conf_dump = out - isp->trace->conf_dump_buffer;
+}
+
+static void clear_trace_buffer(struct ipu_subsystem_trace_config *sys)
+{
+ if (!sys->memory.memory_buffer)
+ return;
+
+ memset(sys->memory.memory_buffer, 0, MEMORY_RING_BUFFER_SIZE +
+ MEMORY_RING_BUFFER_OVERREAD);
+
+ dma_sync_single_for_device(sys->dev,
+ sys->memory.dma_handle,
+ MEMORY_RING_BUFFER_SIZE +
+ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE);
+}
+
+static int traceconf_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct ipu_device *isp;
+
+ if (!inode->i_private)
+ return -EACCES;
+
+ isp = inode->i_private;
+
+ ret = mutex_trylock(&isp->trace->lock);
+ if (!ret)
+ return -EBUSY;
+
+ if (isp->trace->open) {
+ mutex_unlock(&isp->trace->lock);
+ return -EBUSY;
+ }
+
+ file->private_data = isp;
+ isp->trace->open = 1;
+ if (file->f_mode & FMODE_WRITE) {
+ /* TBD: Allocate temp buffer for processing.
+ * Push validated buffer to active config
+ */
+
+ /* Forget old config if opened for write */
+ isp->trace->isys.fill_level = 0;
+ isp->trace->psys.fill_level = 0;
+ isp->trace->psys.wpt.fill_level = 0;
+ }
+
+ if (file->f_mode & FMODE_READ) {
+ isp->trace->conf_dump_buffer =
+ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE);
+ if (!isp->trace->conf_dump_buffer) {
+ isp->trace->open = 0;
+ mutex_unlock(&isp->trace->lock);
+ return -ENOMEM;
+ }
+ traceconf_dump(isp);
+ }
+ mutex_unlock(&isp->trace->lock);
+ return 0;
+}
+
+static ssize_t traceconf_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct ipu_device *isp = file->private_data;
+
+ return simple_read_from_buffer(buf, len, ppos,
+ isp->trace->conf_dump_buffer,
+ isp->trace->size_conf_dump);
+}
+
+static ssize_t traceconf_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int i;
+ struct ipu_device *isp = file->private_data;
+ ssize_t bytes = 0;
+ char *ipu_trace_buffer = NULL;
+ size_t buffer_size = 0;
+ u32 ipu_trace_number = 0;
+ struct config_value *cfg_buffer = NULL;
+
+ if ((*ppos < 0) || (len > TRACE_CONF_DATA_MAX_LEN) ||
+ (len < sizeof(ipu_trace_number))) {
+ dev_info(&isp->pdev->dev,
+ "length is error, len:%ld, loff:%lld\n",
+ len, *ppos);
+ return -EINVAL;
+ }
+
+ ipu_trace_buffer = vzalloc(len);
+ if (!ipu_trace_buffer)
+ return -ENOMEM;
+
+ bytes = copy_from_user(ipu_trace_buffer, buf, len);
+ if (bytes != 0) {
+ vfree(ipu_trace_buffer);
+ return -EFAULT;
+ }
+
+ memcpy(&ipu_trace_number, ipu_trace_buffer, sizeof(u32));
+ buffer_size = ipu_trace_number * sizeof(struct config_value);
+ if ((buffer_size + sizeof(ipu_trace_number)) != len) {
+ dev_info(&isp->pdev->dev,
+ "File size is not right, len:%ld, buffer_size:%zu\n",
+ len, buffer_size);
+ vfree(ipu_trace_buffer);
+ return -EFAULT;
+ }
+
+ mutex_lock(&isp->trace->lock);
+ cfg_buffer = (struct config_value *)(ipu_trace_buffer + sizeof(u32));
+ for (i = 0; i < ipu_trace_number; i++) {
+ update_register_cache(isp, cfg_buffer[i].reg,
+ cfg_buffer[i].value);
+ }
+ mutex_unlock(&isp->trace->lock);
+ vfree(ipu_trace_buffer);
+
+ return len;
+}
+
+static int traceconf_release(struct inode *inode, struct file *file)
+{
+ struct ipu_device *isp = file->private_data;
+ struct device *psys_dev = isp->psys ? &isp->psys->dev : NULL;
+ struct device *isys_dev = isp->isys ? &isp->isys->dev : NULL;
+ int pm_rval = -EINVAL;
+
+ /*
+ * Turn devices on outside trace->lock mutex. PM transition may
+ * cause call to function which tries to take the same lock.
+ * Also do this before trace->open is set back to 0 to avoid
+ * double restore (one here and one in pm transition). We can't
+ * rely purely on the restore done by pm call backs since trace
+ * configuration can occur in any phase compared to other activity.
+ */
+
+ if (file->f_mode & FMODE_WRITE) {
+ if (isys_dev)
+ pm_rval = pm_runtime_get_sync(isys_dev);
+
+ if (pm_rval >= 0) {
+ /* ISYS ok or missing */
+ if (psys_dev)
+ pm_rval = pm_runtime_get_sync(psys_dev);
+
+ if (pm_rval < 0) {
+ pm_runtime_put_noidle(psys_dev);
+ if (isys_dev)
+ pm_runtime_put(isys_dev);
+ }
+ } else {
+ pm_runtime_put_noidle(&isp->isys->dev);
+ }
+ }
+
+ mutex_lock(&isp->trace->lock);
+ isp->trace->open = 0;
+ vfree(isp->trace->conf_dump_buffer);
+ isp->trace->conf_dump_buffer = NULL;
+
+ if (pm_rval >= 0) {
+ /* Update new cfg to HW */
+ if (isys_dev) {
+ __ipu_trace_stop(isys_dev);
+ clear_trace_buffer(isp->isys->trace_cfg);
+ __ipu_trace_restore(isys_dev);
+ }
+
+ if (psys_dev) {
+ __ipu_trace_stop(psys_dev);
+ clear_trace_buffer(isp->psys->trace_cfg);
+ __ipu_trace_restore(psys_dev);
+ }
+ }
+
+ mutex_unlock(&isp->trace->lock);
+
+ if (pm_rval >= 0) {
+ /* Again - this must be done with trace->lock not taken */
+ if (psys_dev)
+ pm_runtime_put(psys_dev);
+ if (isys_dev)
+ pm_runtime_put(isys_dev);
+ }
+ return 0;
+}
+
+static const struct file_operations ipu_traceconf_fops = {
+ .owner = THIS_MODULE,
+ .open = traceconf_open,
+ .release = traceconf_release,
+ .read = traceconf_read,
+ .write = traceconf_write,
+ .llseek = no_llseek,
+};
+
+static void wptraceconf_dump(struct ipu_device *isp)
+{
+ struct ipu_subsystem_wptrace_config *sys = &isp->trace->psys.wpt;
+ int i, rem_size;
+ char *out;
+
+ sys->size_conf_dump = 0;
+ out = sys->conf_dump_buffer;
+ rem_size = TRACE_CONF_DUMP_BUFFER_SIZE;
+
+ for (i = 0; i < sys->fill_level && rem_size > 0; i++) {
+ int bytes_print;
+ int n = snprintf(out, rem_size, "0x%08x = 0x%08x\n",
+ sys->config[i].reg,
+ sys->config[i].value);
+
+ bytes_print = min(n, rem_size - 1);
+ rem_size -= bytes_print;
+ out += bytes_print;
+ }
+ sys->size_conf_dump = out - sys->conf_dump_buffer;
+}
+
+static int wptraceconf_open(struct inode *inode, struct file *file)
+{
+ int ret;
+ struct ipu_device *isp;
+
+ if (!inode->i_private)
+ return -EACCES;
+
+ isp = inode->i_private;
+ ret = mutex_trylock(&isp->trace->lock);
+ if (!ret)
+ return -EBUSY;
+
+ if (isp->trace->psys.wpt.open) {
+ mutex_unlock(&isp->trace->lock);
+ return -EBUSY;
+ }
+
+ file->private_data = isp;
+ if (file->f_mode & FMODE_WRITE) {
+ /* TBD: Allocate temp buffer for processing.
+ * Push validated buffer to active config
+ */
+ /* Forget old config if opened for write */
+ isp->trace->psys.wpt.fill_level = 0;
+ }
+
+ if (file->f_mode & FMODE_READ) {
+ isp->trace->psys.wpt.conf_dump_buffer =
+ vzalloc(TRACE_CONF_DUMP_BUFFER_SIZE);
+ if (!isp->trace->psys.wpt.conf_dump_buffer) {
+ mutex_unlock(&isp->trace->lock);
+ return -ENOMEM;
+ }
+ wptraceconf_dump(isp);
+ }
+ mutex_unlock(&isp->trace->lock);
+ return 0;
+}
+
+static ssize_t wptraceconf_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct ipu_device *isp = file->private_data;
+
+ return simple_read_from_buffer(buf, len, ppos,
+ isp->trace->psys.wpt.conf_dump_buffer,
+ isp->trace->psys.wpt.size_conf_dump);
+}
+
+static ssize_t wptraceconf_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int i;
+ struct ipu_device *isp = file->private_data;
+ ssize_t bytes = 0;
+ char *wpt_info_buffer = NULL;
+ size_t buffer_size = 0;
+ u32 wp_node_number = 0;
+ struct config_value *wpt_buffer = NULL;
+ struct ipu_subsystem_wptrace_config *wpt = &isp->trace->psys.wpt;
+
+ if ((*ppos < 0) || (len > WPT_TRACE_CONF_DATA_MAX_LEN) ||
+ (len < sizeof(wp_node_number))) {
+ dev_info(&isp->pdev->dev,
+ "length is error, len:%ld, loff:%lld\n",
+ len, *ppos);
+ return -EINVAL;
+ }
+
+ wpt_info_buffer = vzalloc(len);
+ if (!wpt_info_buffer)
+ return -ENOMEM;
+
+ bytes = copy_from_user(wpt_info_buffer, buf, len);
+ if (bytes != 0) {
+ vfree(wpt_info_buffer);
+ return -EFAULT;
+ }
+
+ memcpy(&wp_node_number, wpt_info_buffer, sizeof(u32));
+ buffer_size = wp_node_number * sizeof(struct config_value);
+ if ((buffer_size + sizeof(wp_node_number)) != len) {
+ dev_info(&isp->pdev->dev,
+ "File size is not right, len:%ld, buffer_size:%zu\n",
+ len, buffer_size);
+ vfree(wpt_info_buffer);
+ return -EFAULT;
+ }
+
+ mutex_lock(&isp->trace->lock);
+ wpt_buffer = (struct config_value *)(wpt_info_buffer + sizeof(u32));
+ for (i = 0; i < wp_node_number; i++) {
+ if (wpt->fill_level < MAX_TRACE_REGISTERS) {
+ wpt->config[wpt->fill_level].reg = wpt_buffer[i].reg;
+ wpt->config[wpt->fill_level].value =
+ wpt_buffer[i].value;
+ wpt->fill_level++;
+ } else {
+ dev_info(&isp->pdev->dev,
+ "Address 0x%08x ignored as invalid register\n",
+ wpt_buffer[i].reg);
+ break;
+ }
+ }
+ mutex_unlock(&isp->trace->lock);
+ vfree(wpt_info_buffer);
+
+ return len;
+}
+
+static int wptraceconf_release(struct inode *inode, struct file *file)
+{
+ struct ipu_device *isp = file->private_data;
+
+ mutex_lock(&isp->trace->lock);
+ isp->trace->open = 0;
+ vfree(isp->trace->psys.wpt.conf_dump_buffer);
+ isp->trace->psys.wpt.conf_dump_buffer = NULL;
+ mutex_unlock(&isp->trace->lock);
+
+ return 0;
+}
+
+static const struct file_operations ipu_wptraceconf_fops = {
+ .owner = THIS_MODULE,
+ .open = wptraceconf_open,
+ .release = wptraceconf_release,
+ .read = wptraceconf_read,
+ .write = wptraceconf_write,
+ .llseek = no_llseek,
+};
+
+static int gettrace_open(struct inode *inode, struct file *file)
+{
+ struct ipu_subsystem_trace_config *sys = inode->i_private;
+
+ if (!sys)
+ return -EACCES;
+
+ if (!sys->memory.memory_buffer)
+ return -EACCES;
+
+ dma_sync_single_for_cpu(sys->dev,
+ sys->memory.dma_handle,
+ MEMORY_RING_BUFFER_SIZE +
+ MEMORY_RING_BUFFER_GUARD, DMA_FROM_DEVICE);
+
+ file->private_data = sys;
+ return 0;
+};
+
+static ssize_t gettrace_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct ipu_subsystem_trace_config *sys = file->private_data;
+
+ return simple_read_from_buffer(buf, len, ppos,
+ sys->memory.memory_buffer,
+ MEMORY_RING_BUFFER_SIZE +
+ MEMORY_RING_BUFFER_OVERREAD);
+}
+
+static ssize_t gettrace_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct ipu_subsystem_trace_config *sys = file->private_data;
+ static const char str[] = "clear";
+ char buffer[sizeof(str)] = { 0 };
+ ssize_t ret;
+
+ ret = simple_write_to_buffer(buffer, sizeof(buffer), ppos, buf, len);
+ if (ret < 0)
+ return ret;
+
+ if (ret < sizeof(str) - 1)
+ return -EINVAL;
+
+ if (!strncmp(str, buffer, sizeof(str) - 1)) {
+ clear_trace_buffer(sys);
+ return len;
+ }
+
+ return -EINVAL;
+}
+
+static int gettrace_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static const struct file_operations ipu_gettrace_fops = {
+ .owner = THIS_MODULE,
+ .open = gettrace_open,
+ .release = gettrace_release,
+ .read = gettrace_read,
+ .write = gettrace_write,
+ .llseek = no_llseek,
+};
+
+int ipu_trace_init(struct ipu_device *isp, void __iomem *base,
+ struct device *dev, struct ipu_trace_block *blocks)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_trace *trace = isp->trace;
+ struct ipu_subsystem_trace_config *sys;
+ int ret = 0;
+
+ if (!isp->trace)
+ return 0;
+
+ mutex_lock(&isp->trace->lock);
+
+ if (dev == &isp->isys->dev) {
+ sys = &trace->isys;
+ } else if (dev == &isp->psys->dev) {
+ sys = &trace->psys;
+ } else {
+ ret = -EINVAL;
+ goto leave;
+ }
+
+ adev->trace_cfg = sys;
+ sys->dev = dev;
+ sys->offset = base - isp->base; /* sub system offset */
+ sys->base = base;
+ sys->blocks = blocks;
+
+leave:
+ mutex_unlock(&isp->trace->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_trace_init);
+
+void ipu_trace_uninit(struct device *dev)
+{
+ struct ipu_bus_device *adev = to_ipu_bus_device(dev);
+ struct ipu_device *isp = adev->isp;
+ struct ipu_trace *trace = isp->trace;
+ struct ipu_subsystem_trace_config *sys = adev->trace_cfg;
+
+ if (!trace || !sys)
+ return;
+
+ mutex_lock(&trace->lock);
+
+ if (sys->memory.memory_buffer)
+ dma_free_coherent(sys->dev,
+ MEMORY_RING_BUFFER_SIZE +
+ MEMORY_RING_BUFFER_GUARD,
+ sys->memory.memory_buffer,
+ sys->memory.dma_handle);
+
+ sys->dev = NULL;
+ sys->memory.memory_buffer = NULL;
+
+ mutex_unlock(&trace->lock);
+}
+EXPORT_SYMBOL_GPL(ipu_trace_uninit);
+
+int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir)
+{
+ struct dentry *files[4];
+ int i = 0;
+
+ files[i] = debugfs_create_file("traceconf", 0644,
+ dir, isp, &ipu_traceconf_fops);
+ if (!files[i])
+ return -ENOMEM;
+ i++;
+
+ files[i] = debugfs_create_file("wptraceconf", 0644,
+ dir, isp, &ipu_wptraceconf_fops);
+ if (!files[i])
+ goto error;
+ i++;
+
+ files[i] = debugfs_create_file("getisystrace", 0444,
+ dir,
+ &isp->trace->isys, &ipu_gettrace_fops);
+
+ if (!files[i])
+ goto error;
+ i++;
+
+ files[i] = debugfs_create_file("getpsystrace", 0444,
+ dir,
+ &isp->trace->psys, &ipu_gettrace_fops);
+ if (!files[i])
+ goto error;
+
+ return 0;
+
+error:
+ for (; i > 0; i--)
+ debugfs_remove(files[i - 1]);
+ return -ENOMEM;
+}
+
+int ipu_trace_add(struct ipu_device *isp)
+{
+ isp->trace = devm_kzalloc(&isp->pdev->dev,
+ sizeof(struct ipu_trace), GFP_KERNEL);
+ if (!isp->trace)
+ return -ENOMEM;
+
+ mutex_init(&isp->trace->lock);
+
+ return 0;
+}
+
+void ipu_trace_release(struct ipu_device *isp)
+{
+ if (!isp->trace)
+ return;
+ mutex_destroy(&isp->trace->lock);
+}
+
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu trace support");
new file mode 100644
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 - 2021 Intel Corporation */
+
+#ifndef IPU_TRACE_H
+#define IPU_TRACE_H
+#include <linux/debugfs.h>
+
+/* Trace unit register offsets */
+#define TRACE_REG_TUN_DDR_ENABLE 0x000
+#define TRACE_REG_TUN_NPK_ENABLE 0x004
+#define TRACE_REG_TUN_DDR_INFO_VAL 0x008
+#define TRACE_REG_TUN_NPK_ADDR 0x00C
+#define TRACE_REG_TUN_DRAM_BASE_ADDR 0x010
+#define TRACE_REG_TUN_DRAM_END_ADDR 0x014
+#define TRACE_REG_TUN_LOCAL_TIMER0 0x018
+#define TRACE_REG_TUN_LOCAL_TIMER1 0x01C
+#define TRACE_REG_TUN_WR_PTR 0x020
+#define TRACE_REG_TUN_RD_PTR 0x024
+
+/*
+ * Following registers are left out on purpose:
+ * TUN_LOCAL_TIMER0, TUN_LOCAL_TIMER1, TUN_DRAM_BASE_ADDR
+ * TUN_DRAM_END_ADDR, TUN_WR_PTR, TUN_RD_PTR
+ */
+
+/* Trace monitor register offsets */
+#define TRACE_REG_TM_TRACE_ADDR_A 0x0900
+#define TRACE_REG_TM_TRACE_ADDR_B 0x0904
+#define TRACE_REG_TM_TRACE_ADDR_C 0x0908
+#define TRACE_REG_TM_TRACE_ADDR_D 0x090c
+#define TRACE_REG_TM_TRACE_ENABLE_NPK 0x0910
+#define TRACE_REG_TM_TRACE_ENABLE_DDR 0x0914
+#define TRACE_REG_TM_TRACE_PER_PC 0x0918
+#define TRACE_REG_TM_TRACE_PER_BRANCH 0x091c
+#define TRACE_REG_TM_TRACE_HEADER 0x0920
+#define TRACE_REG_TM_TRACE_CFG 0x0924
+#define TRACE_REG_TM_TRACE_LOST_PACKETS 0x0928
+#define TRACE_REG_TM_TRACE_LP_CLEAR 0x092c
+#define TRACE_REG_TM_TRACE_LMRUN_MASK 0x0930
+#define TRACE_REG_TM_TRACE_LMRUN_PC_LOW 0x0934
+#define TRACE_REG_TM_TRACE_LMRUN_PC_HIGH 0x0938
+#define TRACE_REG_TM_TRACE_MMIO_SEL 0x093c
+#define TRACE_REG_TM_TRACE_MMIO_WP0_LOW 0x0940
+#define TRACE_REG_TM_TRACE_MMIO_WP1_LOW 0x0944
+#define TRACE_REG_TM_TRACE_MMIO_WP2_LOW 0x0948
+#define TRACE_REG_TM_TRACE_MMIO_WP3_LOW 0x094c
+#define TRACE_REG_TM_TRACE_MMIO_WP0_HIGH 0x0950
+#define TRACE_REG_TM_TRACE_MMIO_WP1_HIGH 0x0954
+#define TRACE_REG_TM_TRACE_MMIO_WP2_HIGH 0x0958
+#define TRACE_REG_TM_TRACE_MMIO_WP3_HIGH 0x095c
+#define TRACE_REG_TM_FWTRACE_FIRST 0x0A00
+#define TRACE_REG_TM_FWTRACE_MIDDLE 0x0A04
+#define TRACE_REG_TM_FWTRACE_LAST 0x0A08
+
+/*
+ * Following exists only in (I)SP address space:
+ * TM_FWTRACE_FIRST, TM_FWTRACE_MIDDLE, TM_FWTRACE_LAST
+ */
+
+#define TRACE_REG_GPC_RESET 0x000
+#define TRACE_REG_GPC_OVERALL_ENABLE 0x004
+#define TRACE_REG_GPC_TRACE_HEADER 0x008
+#define TRACE_REG_GPC_TRACE_ADDRESS 0x00C
+#define TRACE_REG_GPC_TRACE_NPK_EN 0x010
+#define TRACE_REG_GPC_TRACE_DDR_EN 0x014
+#define TRACE_REG_GPC_TRACE_LPKT_CLEAR 0x018
+#define TRACE_REG_GPC_TRACE_LPKT 0x01C
+
+#define TRACE_REG_GPC_ENABLE_ID0 0x020
+#define TRACE_REG_GPC_ENABLE_ID1 0x024
+#define TRACE_REG_GPC_ENABLE_ID2 0x028
+#define TRACE_REG_GPC_ENABLE_ID3 0x02c
+
+#define TRACE_REG_GPC_VALUE_ID0 0x030
+#define TRACE_REG_GPC_VALUE_ID1 0x034
+#define TRACE_REG_GPC_VALUE_ID2 0x038
+#define TRACE_REG_GPC_VALUE_ID3 0x03c
+
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID0 0x040
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID1 0x044
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID2 0x048
+#define TRACE_REG_GPC_CNT_INPUT_SELECT_ID3 0x04c
+
+#define TRACE_REG_GPC_CNT_START_SELECT_ID0 0x050
+#define TRACE_REG_GPC_CNT_START_SELECT_ID1 0x054
+#define TRACE_REG_GPC_CNT_START_SELECT_ID2 0x058
+#define TRACE_REG_GPC_CNT_START_SELECT_ID3 0x05c
+
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID0 0x060
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID1 0x064
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID2 0x068
+#define TRACE_REG_GPC_CNT_STOP_SELECT_ID3 0x06c
+
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID0 0x070
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID1 0x074
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID2 0x078
+#define TRACE_REG_GPC_CNT_MSG_SELECT_ID3 0x07c
+
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID0 0x080
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID1 0x084
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID2 0x088
+#define TRACE_REG_GPC_CNT_MSG_PLOAD_SELECT_ID3 0x08c
+
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID0 0x090
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID1 0x094
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID2 0x098
+#define TRACE_REG_GPC_IRQ_TRIGGER_VALUE_ID3 0x09c
+
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID0 0x0a0
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID1 0x0a4
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID2 0x0a8
+#define TRACE_REG_GPC_IRQ_TIMER_SELECT_ID3 0x0ac
+
+#define TRACE_REG_GPC_IRQ_ENABLE_ID0 0x0b0
+#define TRACE_REG_GPC_IRQ_ENABLE_ID1 0x0b4
+#define TRACE_REG_GPC_IRQ_ENABLE_ID2 0x0b8
+#define TRACE_REG_GPC_IRQ_ENABLE_ID3 0x0bc
+
+struct ipu_trace;
+struct ipu_subsystem_trace_config;
+
+enum ipu_trace_block_type {
+ IPU_TRACE_BLOCK_TUN = 0, /* Trace unit */
+ IPU_TRACE_BLOCK_TM, /* Trace monitor */
+ IPU_TRACE_BLOCK_GPC, /* General purpose control */
+ IPU_TRACE_CSI2, /* CSI2 legacy receiver */
+ IPU_TRACE_CSI2_3PH, /* CSI2 combo receiver */
+ IPU_TRACE_SIG2CIOS,
+ IPU_TRACE_TIMER_RST, /* Trace reset control timer */
+ IPU_TRACE_BLOCK_END /* End of list */
+};
+
+struct ipu_trace_block {
+ u32 offset; /* Offset to block inside subsystem */
+ enum ipu_trace_block_type type;
+};
+
+int ipu_trace_add(struct ipu_device *isp);
+int ipu_trace_debugfs_add(struct ipu_device *isp, struct dentry *dir);
+void ipu_trace_release(struct ipu_device *isp);
+int ipu_trace_init(struct ipu_device *isp, void __iomem *base,
+ struct device *dev, struct ipu_trace_block *blocks);
+void ipu_trace_restore(struct device *dev);
+void ipu_trace_uninit(struct device *dev);
+void ipu_trace_stop(struct device *dev);
+#endif
new file mode 100644
@@ -0,0 +1,823 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2013 - 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+
+#include "ipu.h"
+#include "ipu-buttress.h"
+#include "ipu-platform.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-cpd.h"
+#include "ipu-pdata.h"
+#include "ipu-bus.h"
+#include "ipu-mmu.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+#include "ipu-trace.h"
+
+#define IPU_PCI_BAR 0
+enum ipu_version ipu_ver;
+EXPORT_SYMBOL(ipu_ver);
+
+static struct ipu_bus_device *ipu_isys_init(struct pci_dev *pdev,
+ struct device *parent,
+ struct ipu_buttress_ctrl *ctrl,
+ void __iomem *base,
+ const struct ipu_isys_internal_pdata
+ *ipdata,
+ unsigned int nr)
+{
+ struct ipu_bus_device *isys;
+ struct ipu_isys_pdata *pdata;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ pdata->ipdata = ipdata;
+
+ /* Use 250MHz for ipu6 se */
+ if (ipu_ver == IPU_VER_6SE)
+ ctrl->ratio = IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO;
+
+ isys = ipu_bus_add_device(pdev, parent, pdata, ctrl,
+ IPU_ISYS_NAME, nr);
+ if (IS_ERR(isys))
+ return ERR_PTR(-ENOMEM);
+
+ isys->mmu = ipu_mmu_init(&pdev->dev, base, ISYS_MMID,
+ &ipdata->hw_variant);
+ if (IS_ERR(isys->mmu))
+ return ERR_PTR(-ENOMEM);
+
+ isys->mmu->dev = &isys->dev;
+
+ return isys;
+}
+
+static struct ipu_bus_device *ipu_psys_init(struct pci_dev *pdev,
+ struct device *parent,
+ struct ipu_buttress_ctrl *ctrl,
+ void __iomem *base,
+ const struct ipu_psys_internal_pdata
+ *ipdata, unsigned int nr)
+{
+ struct ipu_bus_device *psys;
+ struct ipu_psys_pdata *pdata;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->base = base;
+ pdata->ipdata = ipdata;
+
+ psys = ipu_bus_add_device(pdev, parent, pdata, ctrl,
+ IPU_PSYS_NAME, nr);
+ if (IS_ERR(psys))
+ return ERR_PTR(-ENOMEM);
+
+ psys->mmu = ipu_mmu_init(&pdev->dev, base, PSYS_MMID,
+ &ipdata->hw_variant);
+ if (IS_ERR(psys->mmu))
+ return ERR_PTR(-ENOMEM);
+
+ psys->mmu->dev = &psys->dev;
+
+ return psys;
+}
+
+int ipu_fw_authenticate(void *data, u64 val)
+{
+ struct ipu_device *isp = data;
+ int ret;
+
+ if (!isp->secure_mode)
+ return -EINVAL;
+
+ ret = ipu_buttress_reset_authentication(isp);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Failed to reset authentication!\n");
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(&isp->psys->dev);
+ if (ret < 0) {
+ dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ipu_buttress_authenticate(isp);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "FW authentication failed\n");
+ return ret;
+ }
+
+ pm_runtime_put(&isp->psys->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(ipu_fw_authenticate);
+DEFINE_SIMPLE_ATTRIBUTE(authenticate_fops, NULL, ipu_fw_authenticate, "%llu\n");
+
+#ifdef CONFIG_DEBUG_FS
+static int resume_ipu_bus_device(struct ipu_bus_device *adev)
+{
+ struct device *dev = &adev->dev;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (!pm || !pm->resume)
+ return -EIO;
+
+ return pm->resume(dev);
+}
+
+static int suspend_ipu_bus_device(struct ipu_bus_device *adev)
+{
+ struct device *dev = &adev->dev;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (!pm || !pm->suspend)
+ return -EIO;
+
+ return pm->suspend(dev);
+}
+
+static int force_suspend_get(void *data, u64 *val)
+{
+ struct ipu_device *isp = data;
+ struct ipu_buttress *b = &isp->buttress;
+
+ *val = b->force_suspend;
+ return 0;
+}
+
+static int force_suspend_set(void *data, u64 val)
+{
+ struct ipu_device *isp = data;
+ struct ipu_buttress *b = &isp->buttress;
+ int ret = 0;
+
+ if (val == b->force_suspend)
+ return 0;
+
+ if (val) {
+ b->force_suspend = 1;
+ ret = suspend_ipu_bus_device(isp->psys);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Failed to suspend psys\n");
+ return ret;
+ }
+ ret = suspend_ipu_bus_device(isp->isys);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Failed to suspend isys\n");
+ return ret;
+ }
+ ret = pci_set_power_state(isp->pdev, PCI_D3hot);
+ if (ret) {
+ dev_err(&isp->pdev->dev,
+ "Failed to suspend IUnit PCI device\n");
+ return ret;
+ }
+ } else {
+ ret = pci_set_power_state(isp->pdev, PCI_D0);
+ if (ret) {
+ dev_err(&isp->pdev->dev,
+ "Failed to suspend IUnit PCI device\n");
+ return ret;
+ }
+ ret = resume_ipu_bus_device(isp->isys);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Failed to resume isys\n");
+ return ret;
+ }
+ ret = resume_ipu_bus_device(isp->psys);
+ if (ret) {
+ dev_err(&isp->pdev->dev, "Failed to resume psys\n");
+ return ret;
+ }
+ b->force_suspend = 0;
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(force_suspend_fops, force_suspend_get,
+ force_suspend_set, "%llu\n");
+/*
+ * The sysfs interface for reloading cpd fw is there only for debug purpose,
+ * and it must not be used when either isys or psys is in use.
+ */
+static int cpd_fw_reload(void *data, u64 val)
+{
+ struct ipu_device *isp = data;
+ int rval = -EINVAL;
+
+ if (isp->cpd_fw_reload)
+ rval = isp->cpd_fw_reload(isp);
+
+ return rval;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cpd_fw_fops, NULL, cpd_fw_reload, "%llu\n");
+
+static int ipu_init_debugfs(struct ipu_device *isp)
+{
+ struct dentry *file;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(pci_name(isp->pdev), NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ file = debugfs_create_file("force_suspend", 0700, dir, isp,
+ &force_suspend_fops);
+ if (!file)
+ goto err;
+ file = debugfs_create_file("authenticate", 0700, dir, isp,
+ &authenticate_fops);
+ if (!file)
+ goto err;
+
+ file = debugfs_create_file("cpd_fw_reload", 0700, dir, isp,
+ &cpd_fw_fops);
+ if (!file)
+ goto err;
+
+ if (ipu_trace_debugfs_add(isp, dir))
+ goto err;
+
+ isp->ipu_dir = dir;
+
+ if (ipu_buttress_debugfs_init(isp))
+ goto err;
+
+ return 0;
+err:
+ debugfs_remove_recursive(dir);
+ return -ENOMEM;
+}
+
+static void ipu_remove_debugfs(struct ipu_device *isp)
+{
+ /*
+ * Since isys and psys debugfs dir will be created under ipu root dir,
+ * mark its dentry to NULL to avoid duplicate removal.
+ */
+ debugfs_remove_recursive(isp->ipu_dir);
+ isp->ipu_dir = NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int ipu_pci_config_setup(struct pci_dev *dev)
+{
+ u16 pci_command;
+ int rval;
+
+ pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+ pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ pci_write_config_word(dev, PCI_COMMAND, pci_command);
+ if (ipu_ver == IPU_VER_6EP) {
+ /* likely do nothing as msi not enabled by default */
+ pci_disable_msi(dev);
+ return 0;
+ }
+
+ rval = pci_enable_msi(dev);
+ if (rval)
+ dev_err(&dev->dev, "Failed to enable msi (%d)\n", rval);
+
+ return rval;
+}
+
+static void ipu_configure_vc_mechanism(struct ipu_device *isp)
+{
+ u32 val = readl(isp->base + BUTTRESS_REG_BTRS_CTRL);
+
+ if (IPU_BTRS_ARB_STALL_MODE_VC0 == IPU_BTRS_ARB_MODE_TYPE_STALL)
+ val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+ else
+ val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0;
+
+ if (IPU_BTRS_ARB_STALL_MODE_VC1 == IPU_BTRS_ARB_MODE_TYPE_STALL)
+ val |= BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+ else
+ val &= ~BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1;
+
+ writel(val, isp->base + BUTTRESS_REG_BTRS_CTRL);
+}
+
+int request_cpd_fw(const struct firmware **firmware_p, const char *name,
+ struct device *device)
+{
+ const struct firmware *fw;
+ struct firmware *tmp;
+ int ret;
+
+ ret = request_firmware(&fw, name, device);
+ if (ret)
+ return ret;
+
+ if (is_vmalloc_addr(fw->data)) {
+ *firmware_p = fw;
+ } else {
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ tmp->size = fw->size;
+ tmp->data = vmalloc(fw->size);
+ if (!tmp->data) {
+ kfree(tmp);
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+ memcpy((void *)tmp->data, fw->data, fw->size);
+ *firmware_p = tmp;
+ release_firmware(fw);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(request_cpd_fw);
+
+static int ipu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ipu_device *isp;
+ phys_addr_t phys;
+ void __iomem *const *iomap;
+ void __iomem *isys_base = NULL;
+ void __iomem *psys_base = NULL;
+ struct ipu_buttress_ctrl *isys_ctrl = NULL, *psys_ctrl = NULL;
+ unsigned int dma_mask = IPU_DMA_MASK;
+ int rval;
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ dev_set_name(&pdev->dev, "intel-ipu");
+ isp->pdev = pdev;
+ INIT_LIST_HEAD(&isp->devices);
+
+ rval = pcim_enable_device(pdev);
+ if (rval) {
+ dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n",
+ rval);
+ return rval;
+ }
+
+ dev_info(&pdev->dev, "Device 0x%x (rev: 0x%x)\n",
+ pdev->device, pdev->revision);
+
+ phys = pci_resource_start(pdev, IPU_PCI_BAR);
+
+ rval = pcim_iomap_regions(pdev,
+ 1 << IPU_PCI_BAR,
+ pci_name(pdev));
+ if (rval) {
+ dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n",
+ rval);
+ return rval;
+ }
+ dev_info(&pdev->dev, "physical base address 0x%llx\n", phys);
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap) {
+ dev_err(&pdev->dev, "Failed to iomap table (%d)\n", rval);
+ return -ENODEV;
+ }
+
+ isp->base = iomap[IPU_PCI_BAR];
+ dev_info(&pdev->dev, "mapped as: 0x%p\n", isp->base);
+
+ pci_set_drvdata(pdev, isp);
+ pci_set_master(pdev);
+
+ switch (id->device) {
+ case IPU6_PCI_ID:
+ ipu_ver = IPU_VER_6;
+ isp->cpd_fw_name = IPU6_FIRMWARE_NAME;
+ break;
+ case IPU6SE_PCI_ID:
+ ipu_ver = IPU_VER_6SE;
+ isp->cpd_fw_name = IPU6SE_FIRMWARE_NAME;
+ break;
+ case IPU6EP_ADL_P_PCI_ID:
+ case IPU6EP_ADL_N_PCI_ID:
+ ipu_ver = IPU_VER_6EP;
+ isp->cpd_fw_name = IPU6EP_FIRMWARE_NAME;
+ break;
+ default:
+ WARN(1, "Unsupported IPU device");
+ return -ENODEV;
+ }
+
+ ipu_internal_pdata_init();
+
+ isys_base = isp->base + isys_ipdata.hw_variant.offset;
+ psys_base = isp->base + psys_ipdata.hw_variant.offset;
+
+ dev_dbg(&pdev->dev, "isys_base: 0x%lx\n", (unsigned long)isys_base);
+ dev_dbg(&pdev->dev, "psys_base: 0x%lx\n", (unsigned long)psys_base);
+
+ rval = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask));
+ if (!rval)
+ rval = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(dma_mask));
+ if (rval) {
+ dev_err(&pdev->dev, "Failed to set DMA mask (%d)\n", rval);
+ return rval;
+ }
+
+ dma_set_max_seg_size(&pdev->dev, UINT_MAX);
+
+ rval = ipu_pci_config_setup(pdev);
+ if (rval)
+ return rval;
+
+ rval = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+ ipu_buttress_isr,
+ ipu_buttress_isr_threaded,
+ IRQF_SHARED, IPU_NAME, isp);
+ if (rval) {
+ dev_err(&pdev->dev, "Requesting irq failed(%d)\n", rval);
+ return rval;
+ }
+
+ rval = ipu_buttress_init(isp);
+ if (rval)
+ return rval;
+
+ dev_info(&pdev->dev, "cpd file name: %s\n", isp->cpd_fw_name);
+
+ rval = request_cpd_fw(&isp->cpd_fw, isp->cpd_fw_name, &pdev->dev);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Requesting signed firmware failed\n");
+ return rval;
+ }
+
+ rval = ipu_cpd_validate_cpd_file(isp, isp->cpd_fw->data,
+ isp->cpd_fw->size);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Failed to validate cpd\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ rval = ipu_trace_add(isp);
+ if (rval)
+ dev_err(&pdev->dev, "Trace support not available\n");
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
+
+ /*
+ * NOTE Device hierarchy below is important to ensure proper
+ * runtime suspend and resume order.
+ * Also registration order is important to ensure proper
+ * suspend and resume order during system
+ * suspend. Registration order is as follows:
+ * isys->psys
+ */
+ isys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*isys_ctrl), GFP_KERNEL);
+ if (!isys_ctrl) {
+ rval = -ENOMEM;
+ goto out_ipu_bus_del_devices;
+ }
+
+ /* Init butress control with default values based on the HW */
+ memcpy(isys_ctrl, &isys_buttress_ctrl, sizeof(*isys_ctrl));
+
+ isp->isys = ipu_isys_init(pdev, &pdev->dev,
+ isys_ctrl, isys_base,
+ &isys_ipdata,
+ 0);
+ if (IS_ERR(isp->isys)) {
+ rval = PTR_ERR(isp->isys);
+ goto out_ipu_bus_del_devices;
+ }
+
+ psys_ctrl = devm_kzalloc(&pdev->dev, sizeof(*psys_ctrl), GFP_KERNEL);
+ if (!psys_ctrl) {
+ rval = -ENOMEM;
+ goto out_ipu_bus_del_devices;
+ }
+
+ /* Init butress control with default values based on the HW */
+ memcpy(psys_ctrl, &psys_buttress_ctrl, sizeof(*psys_ctrl));
+
+ isp->psys = ipu_psys_init(pdev, &isp->isys->dev,
+ psys_ctrl, psys_base,
+ &psys_ipdata, 0);
+ if (IS_ERR(isp->psys)) {
+ rval = PTR_ERR(isp->psys);
+ goto out_ipu_bus_del_devices;
+ }
+
+ rval = pm_runtime_get_sync(&isp->psys->dev);
+ if (rval < 0) {
+ dev_err(&isp->psys->dev, "Failed to get runtime PM\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ rval = ipu_mmu_hw_init(isp->psys->mmu);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "Failed to set mmu hw\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ rval = ipu_buttress_map_fw_image(isp->psys, isp->cpd_fw,
+ &isp->fw_sgt);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "failed to map fw image\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ isp->pkg_dir = ipu_cpd_create_pkg_dir(isp->psys,
+ isp->cpd_fw->data,
+ sg_dma_address(isp->fw_sgt.sgl),
+ &isp->pkg_dir_dma_addr,
+ &isp->pkg_dir_size);
+ if (!isp->pkg_dir) {
+ rval = -ENOMEM;
+ dev_err(&isp->pdev->dev, "failed to create pkg dir\n");
+ goto out_ipu_bus_del_devices;
+ }
+
+ rval = ipu_buttress_authenticate(isp);
+ if (rval) {
+ dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n",
+ rval);
+ goto out_ipu_bus_del_devices;
+ }
+
+ ipu_mmu_hw_cleanup(isp->psys->mmu);
+ pm_runtime_put(&isp->psys->dev);
+
+#ifdef CONFIG_DEBUG_FS
+ rval = ipu_init_debugfs(isp);
+ if (rval) {
+ dev_err(&pdev->dev, "Failed to initialize debugfs");
+ goto out_ipu_bus_del_devices;
+ }
+#endif
+
+ /* Configure the arbitration mechanisms for VC requests */
+ ipu_configure_vc_mechanism(isp);
+
+ dev_info(&pdev->dev, "IPU driver version %d.%d\n", IPU_MAJOR_VERSION,
+ IPU_MINOR_VERSION);
+
+ return 0;
+
+out_ipu_bus_del_devices:
+ if (isp->pkg_dir) {
+ ipu_cpd_free_pkg_dir(isp->psys, isp->pkg_dir,
+ isp->pkg_dir_dma_addr,
+ isp->pkg_dir_size);
+ ipu_buttress_unmap_fw_image(isp->psys, &isp->fw_sgt);
+ isp->pkg_dir = NULL;
+ }
+ if (isp->psys && isp->psys->mmu)
+ ipu_mmu_cleanup(isp->psys->mmu);
+ if (isp->isys && isp->isys->mmu)
+ ipu_mmu_cleanup(isp->isys->mmu);
+ if (isp->psys)
+ pm_runtime_put(&isp->psys->dev);
+ ipu_bus_del_devices(pdev);
+ ipu_buttress_exit(isp);
+ release_firmware(isp->cpd_fw);
+
+ return rval;
+}
+
+static void ipu_pci_remove(struct pci_dev *pdev)
+{
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+
+#ifdef CONFIG_DEBUG_FS
+ ipu_remove_debugfs(isp);
+#endif
+ ipu_trace_release(isp);
+
+ ipu_cpd_free_pkg_dir(isp->psys, isp->pkg_dir, isp->pkg_dir_dma_addr,
+ isp->pkg_dir_size);
+
+ ipu_buttress_unmap_fw_image(isp->psys, &isp->fw_sgt);
+
+ isp->pkg_dir = NULL;
+ isp->pkg_dir_dma_addr = 0;
+ isp->pkg_dir_size = 0;
+
+ ipu_bus_del_devices(pdev);
+
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ ipu_buttress_exit(isp);
+
+ release_firmware(isp->cpd_fw);
+
+ ipu_mmu_cleanup(isp->psys->mmu);
+ ipu_mmu_cleanup(isp->isys->mmu);
+}
+
+static void ipu_pci_reset_prepare(struct pci_dev *pdev)
+{
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+
+ dev_warn(&pdev->dev, "FLR prepare\n");
+ pm_runtime_forbid(&isp->pdev->dev);
+ isp->flr_done = true;
+}
+
+static void ipu_pci_reset_done(struct pci_dev *pdev)
+{
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+
+ ipu_buttress_restore(isp);
+ if (isp->secure_mode)
+ ipu_buttress_reset_authentication(isp);
+
+ ipu_bus_flr_recovery();
+ isp->ipc_reinit = true;
+ pm_runtime_allow(&isp->pdev->dev);
+
+ dev_warn(&pdev->dev, "FLR completed\n");
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * PCI base driver code requires driver to provide these to enable
+ * PCI device level PM state transitions (D0<->D3)
+ */
+static int ipu_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+
+ isp->flr_done = false;
+
+ return 0;
+}
+
+static int ipu_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+ struct ipu_buttress *b = &isp->buttress;
+ int rval;
+
+ /* Configure the arbitration mechanisms for VC requests */
+ ipu_configure_vc_mechanism(isp);
+
+ ipu_buttress_set_secure_mode(isp);
+ isp->secure_mode = ipu_buttress_get_secure_mode(isp);
+ dev_info(dev, "IPU in %s mode\n",
+ isp->secure_mode ? "secure" : "non-secure");
+
+ ipu_buttress_restore(isp);
+
+ rval = ipu_buttress_ipc_reset(isp, &b->cse);
+ if (rval)
+ dev_err(&isp->pdev->dev, "IPC reset protocol failed!\n");
+
+ rval = pm_runtime_get_sync(&isp->psys->dev);
+ if (rval < 0) {
+ dev_err(&isp->psys->dev, "Failed to get runtime PM\n");
+ return 0;
+ }
+
+ rval = ipu_buttress_authenticate(isp);
+ if (rval)
+ dev_err(&isp->pdev->dev, "FW authentication failed(%d)\n",
+ rval);
+
+ pm_runtime_put(&isp->psys->dev);
+
+ return 0;
+}
+
+static int ipu_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ipu_device *isp = pci_get_drvdata(pdev);
+ int rval;
+
+ ipu_configure_vc_mechanism(isp);
+ ipu_buttress_restore(isp);
+
+ if (isp->ipc_reinit) {
+ struct ipu_buttress *b = &isp->buttress;
+
+ isp->ipc_reinit = false;
+ rval = ipu_buttress_ipc_reset(isp, &b->cse);
+ if (rval)
+ dev_err(&isp->pdev->dev,
+ "IPC reset protocol failed!\n");
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ipu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(&ipu_suspend, &ipu_resume)
+ SET_RUNTIME_PM_OPS(&ipu_suspend, /* Same as in suspend flow */
+ &ipu_runtime_resume,
+ NULL)
+};
+
+#define IPU_PM (&ipu_pm_ops)
+#else
+#define IPU_PM NULL
+#endif
+
+static const struct pci_device_id ipu_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6SE_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_P_PCI_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, IPU6EP_ADL_N_PCI_ID)},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, ipu_pci_tbl);
+
+static const struct pci_error_handlers pci_err_handlers = {
+ .reset_prepare = ipu_pci_reset_prepare,
+ .reset_done = ipu_pci_reset_done,
+};
+
+static struct pci_driver ipu_pci_driver = {
+ .name = IPU_NAME,
+ .id_table = ipu_pci_tbl,
+ .probe = ipu_pci_probe,
+ .remove = ipu_pci_remove,
+ .driver = {
+ .pm = IPU_PM,
+ },
+ .err_handler = &pci_err_handlers,
+};
+
+static int __init ipu_init(void)
+{
+ int rval = ipu_bus_register();
+
+ if (rval) {
+ pr_warn("can't register ipu bus (%d)\n", rval);
+ return rval;
+ }
+
+ rval = pci_register_driver(&ipu_pci_driver);
+ if (rval) {
+ pr_warn("can't register pci driver (%d)\n", rval);
+ goto out_pci_register_driver;
+ }
+
+ return 0;
+
+out_pci_register_driver:
+ ipu_bus_unregister();
+
+ return rval;
+}
+
+static void __exit ipu_exit(void)
+{
+ pci_unregister_driver(&ipu_pci_driver);
+ ipu_bus_unregister();
+}
+
+module_init(ipu_init);
+module_exit(ipu_exit);
+
+MODULE_AUTHOR("Sakari Ailus <sakari.ailus@linux.intel.com>");
+MODULE_AUTHOR("Jouni Högander <jouni.hogander@intel.com>");
+MODULE_AUTHOR("Antti Laakso <antti.laakso@intel.com>");
+MODULE_AUTHOR("Samu Onkalo <samu.onkalo@intel.com>");
+MODULE_AUTHOR("Jianxu Zheng <jian.xu.zheng@intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Renwei Wu <renwei.wu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Yunliang Ding <yunliang.ding@intel.com>");
+MODULE_AUTHOR("Zaikuo Wang <zaikuo.wang@intel.com>");
+MODULE_AUTHOR("Leifu Zhao <leifu.zhao@intel.com>");
+MODULE_AUTHOR("Xia Wu <xia.wu@intel.com>");
+MODULE_AUTHOR("Kun Jiang <kun.jiang@intel.com>");
+MODULE_AUTHOR("Intel");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel ipu pci driver");
new file mode 100644
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_H
+#define IPU_H
+
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <uapi/linux/media.h>
+#include <linux/version.h>
+
+#include "ipu-pdata.h"
+#include "ipu-bus.h"
+#include "ipu-buttress.h"
+#include "ipu-trace.h"
+
+#define IPU6_PCI_ID 0x9a19
+#define IPU6SE_PCI_ID 0x4e19
+#define IPU6EP_ADL_P_PCI_ID 0x465d
+#define IPU6EP_ADL_N_PCI_ID 0x462e
+
+enum ipu_version {
+ IPU_VER_INVALID = 0,
+ IPU_VER_6,
+ IPU_VER_6SE,
+ IPU_VER_6EP,
+};
+
+/*
+ * IPU version definitions to reflect the IPU driver changes.
+ * Both ISYS and PSYS share the same version.
+ */
+#define IPU_MAJOR_VERSION 1
+#define IPU_MINOR_VERSION 0
+#define IPU_DRIVER_VERSION (IPU_MAJOR_VERSION << 16 | IPU_MINOR_VERSION)
+
+/* processing system frequency: 25Mhz x ratio, Legal values [8,32] */
+#define PS_FREQ_CTL_DEFAULT_RATIO 0x12
+
+/* input system frequency: 1600Mhz / divisor. Legal values [2,8] */
+#define IS_FREQ_SOURCE 1600000000
+#define IS_FREQ_CTL_DIVISOR 0x4
+
+/*
+ * ISYS DMA can overshoot. For higher resolutions over allocation is one line
+ * but it must be at minimum 1024 bytes. Value could be different in
+ * different versions / generations thus provide it via platform data.
+ */
+#define IPU_ISYS_OVERALLOC_MIN 1024
+
+/*
+ * Physical pages in GDA is 128, page size is 2K for IPU6, 1K for others.
+ */
+#define IPU_DEVICE_GDA_NR_PAGES 128
+
+/*
+ * Virtualization factor to calculate the available virtual pages.
+ */
+#define IPU_DEVICE_GDA_VIRT_FACTOR 32
+
+struct pci_dev;
+struct list_head;
+struct firmware;
+
+#define NR_OF_MMU_RESOURCES 2
+
+struct ipu_device {
+ struct pci_dev *pdev;
+ struct list_head devices;
+ struct ipu_bus_device *isys;
+ struct ipu_bus_device *psys;
+ struct ipu_buttress buttress;
+
+ const struct firmware *cpd_fw;
+ const char *cpd_fw_name;
+ u64 *pkg_dir;
+ dma_addr_t pkg_dir_dma_addr;
+ unsigned int pkg_dir_size;
+ struct sg_table fw_sgt;
+
+ void __iomem *base;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *ipu_dir;
+#endif
+ struct ipu_trace *trace;
+ bool flr_done;
+ bool ipc_reinit;
+ bool secure_mode;
+
+ int (*cpd_fw_reload)(struct ipu_device *isp);
+};
+
+#define IPU_DMA_MASK 39
+#define IPU_LIB_CALL_TIMEOUT_MS 2000
+#define IPU_PSYS_CMD_TIMEOUT_MS 2000
+#define IPU_PSYS_OPEN_TIMEOUT_US 50
+#define IPU_PSYS_OPEN_RETRY (10000 / IPU_PSYS_OPEN_TIMEOUT_US)
+
+int ipu_fw_authenticate(void *data, u64 val);
+void ipu_configure_spc(struct ipu_device *isp,
+ const struct ipu_hw_variants *hw_variant,
+ int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+ dma_addr_t pkg_dir_dma_addr);
+int request_cpd_fw(const struct firmware **firmware_p, const char *name,
+ struct device *device);
+extern enum ipu_version ipu_ver;
+void ipu_internal_pdata_init(void);
+
+#endif /* IPU_H */
new file mode 100644
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2017 - 2020 Intel Corporation.
+
+ifneq ($(EXTERNAL_BUILD), 1)
+srcpath := $(srctree)
+endif
+
+ccflags-y += -DIPU_TPG_FRAME_SYNC -DIPU_PSYS_GPC \
+ -DIPU_ISYS_GPC
+
+intel-ipu6-objs += ../ipu.o \
+ ../ipu-bus.o \
+ ../ipu-dma.o \
+ ../ipu-mmu.o \
+ ../ipu-buttress.o \
+ ../ipu-trace.o \
+ ../ipu-cpd.o \
+ ipu6.o \
+ ../ipu-fw-com.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6.o
+
+intel-ipu6-isys-objs += ../ipu-isys.o \
+ ../ipu-isys-csi2.o \
+ ipu6-isys.o \
+ ipu6-isys-phy.o \
+ ipu6-isys-csi2.o \
+ ipu6-isys-gpc.o \
+ ../ipu-isys-csi2-be-soc.o \
+ ../ipu-fw-isys.o \
+ ../ipu-isys-video.o \
+ ../ipu-isys-queue.o \
+ ../ipu-isys-subdev.o
+
+obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-isys.o
+
+intel-ipu6-psys-objs += ../ipu-psys.o \
+ ipu6-psys.o \
+ ipu-resources.o \
+ ipu6-psys-gpc.o \
+ ipu6-l-scheduler.o \
+ ipu6-ppg.o
+
+intel-ipu6-psys-objs += ipu-fw-resources.o \
+ ipu6-fw-resources.o \
+ ipu6se-fw-resources.o \
+ ipu6ep-fw-resources.o \
+ ../ipu-fw-psys.o
+
+ifeq ($(CONFIG_COMPAT),y)
+intel-ipu6-psys-objs += ../ipu-psys-compat32.o
+endif
+
+obj-$(CONFIG_VIDEO_INTEL_IPU6) += intel-ipu6-psys.o
+
+ccflags-y += -I$(srcpath)/$(src)/../../../../../include/
+ccflags-y += -I$(srcpath)/$(src)/../
+ccflags-y += -I$(srcpath)/$(src)/
new file mode 100644
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2019 Intel Corporation
+
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "ipu-psys.h"
+#include "ipu-fw-psys.h"
+#include "ipu6-platform-resources.h"
+#include "ipu6se-platform-resources.h"
+
+/********** Generic resource handling **********/
+
+/*
+ * Extension library gives byte offsets to its internal structures.
+ * use those offsets to update fields. Without extension lib access
+ * structures directly.
+ */
+const struct ipu6_psys_hw_res_variant *var = &hw_var;
+
+int ipu_fw_psys_set_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index,
+ u8 value)
+{
+ struct ipu_fw_psys_process_group *parent =
+ (struct ipu_fw_psys_process_group *)((char *)ptr +
+ ptr->parent_offset);
+
+ ptr->cells[index] = value;
+ parent->resource_bitmap |= 1 << value;
+
+ return 0;
+}
+
+u8 ipu_fw_psys_get_process_cell_id(struct ipu_fw_psys_process *ptr, u8 index)
+{
+ return ptr->cells[index];
+}
+
+int ipu_fw_psys_clear_process_cell(struct ipu_fw_psys_process *ptr)
+{
+ struct ipu_fw_psys_process_group *parent;
+ u8 cell_id = ipu_fw_psys_get_process_cell_id(ptr, 0);
+ int retval = -1;
+ u8 value;
+
+ parent = (struct ipu_fw_psys_process_group *)((char *)ptr +
+ ptr->parent_offset);
+
+ value = var->cell_num;
+ if ((1 << cell_id) != 0 &&
+ ((1 << cell_id) & parent->resource_bitmap)) {
+ ipu_fw_psys_set_process_cell_id(ptr, 0, value);
+ parent->resource_bitmap &= ~(1 << cell_id);
+ retval = 0;
+ }
+
+ return retval;
+}
+
+int ipu_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+ u16 value)
+{
+ if (var->set_proc_dev_chn)
+ return var->set_proc_dev_chn(ptr, offset, value);
+
+ WARN(1, "ipu6 psys res var is not initialised correctly.");
+ return 0;
+}
+
+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+ u16 id, u32 bitmap,
+ u32 active_bitmap)
+{
+ if (var->set_proc_dfm_bitmap)
+ return var->set_proc_dfm_bitmap(ptr, id, bitmap,
+ active_bitmap);
+
+ WARN(1, "ipu6 psys res var is not initialised correctly.");
+ return 0;
+}
+
+int ipu_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+ u16 type_id, u16 mem_id, u16 offset)
+{
+ if (var->set_proc_ext_mem)
+ return var->set_proc_ext_mem(ptr, type_id, mem_id, offset);
+
+ WARN(1, "ipu6 psys res var is not initialised correctly.");
+ return 0;
+}
+
+int ipu_fw_psys_get_program_manifest_by_process(
+ struct ipu_fw_generic_program_manifest *gen_pm,
+ const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+ struct ipu_fw_psys_process *process)
+{
+ if (var->get_pgm_by_proc)
+ return var->get_pgm_by_proc(gen_pm, pg_manifest, process);
+
+ WARN(1, "ipu6 psys res var is not initialised correctly.");
+ return 0;
+}
+
new file mode 100644
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_BUTTRESS_REGS_H
+#define IPU_PLATFORM_BUTTRESS_REGS_H
+
+/* IS_WORKPOINT_REQ */
+#define IPU_BUTTRESS_REG_IS_FREQ_CTL 0x34
+/* PS_WORKPOINT_REQ */
+#define IPU_BUTTRESS_REG_PS_FREQ_CTL 0x38
+
+#define IPU_BUTTRESS_IS_FREQ_RATIO_MASK 0xff
+#define IPU_BUTTRESS_PS_FREQ_RATIO_MASK 0xff
+
+#define IPU_IS_FREQ_MAX 533
+#define IPU_IS_FREQ_MIN 200
+#define IPU_PS_FREQ_MAX 450
+#define IPU_IS_FREQ_RATIO_BASE 25
+#define IPU_PS_FREQ_RATIO_BASE 25
+#define IPU_BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK 0xff
+#define IPU_BUTTRESS_PS_FREQ_CTL_DIVISOR_MASK 0xff
+
+/* should be tuned for real silicon */
+#define IPU_IS_FREQ_CTL_DEFAULT_RATIO 0x08
+#define IPU6SE_IS_FREQ_CTL_DEFAULT_RATIO 0x0a
+#define IPU_PS_FREQ_CTL_DEFAULT_RATIO 0x10
+
+#define IPU_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x10
+#define IPU_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO 0x0708
+
+#define IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3
+#define IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK \
+ (0x3 << IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT)
+
+#define IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6
+#define IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK \
+ (0x3 << IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT)
+
+#define IPU_BUTTRESS_PWR_STATE_DN_DONE 0x0
+#define IPU_BUTTRESS_PWR_STATE_UP_PROCESS 0x1
+#define IPU_BUTTRESS_PWR_STATE_DN_PROCESS 0x2
+#define IPU_BUTTRESS_PWR_STATE_UP_DONE 0x3
+
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_0 0x270
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_1 0x274
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_2 0x278
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_3 0x27c
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_4 0x280
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_5 0x284
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_6 0x288
+#define IPU_BUTTRESS_REG_FPGA_SUPPORT_7 0x28c
+
+#define BUTTRESS_REG_WDT 0x8
+#define BUTTRESS_REG_BTRS_CTRL 0xc
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC0 BIT(0)
+#define BUTTRESS_REG_BTRS_CTRL_STALL_MODE_VC1 BIT(1)
+
+#define BUTTRESS_REG_FW_RESET_CTL 0x30
+#define BUTTRESS_FW_RESET_CTL_START BIT(0)
+#define BUTTRESS_FW_RESET_CTL_DONE BIT(1)
+
+#define BUTTRESS_REG_IS_FREQ_CTL 0x34
+
+#define BUTTRESS_IS_FREQ_CTL_DIVISOR_MASK 0xf
+
+#define BUTTRESS_REG_PS_FREQ_CTL 0x38
+
+#define BUTTRESS_PS_FREQ_CTL_RATIO_MASK 0xff
+
+#define BUTTRESS_FREQ_CTL_START BIT(31)
+#define BUTTRESS_FREQ_CTL_START_SHIFT 31
+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_SHIFT 8
+#define BUTTRESS_FREQ_CTL_ICCMAX_LEVEL (GENMASK(19, 16))
+#define BUTTRESS_FREQ_CTL_QOS_FLOOR_MASK (0xff << 8)
+
+#define BUTTRESS_REG_PWR_STATE 0x5c
+
+#define BUTTRESS_PWR_STATE_IS_PWR_SHIFT 3
+#define BUTTRESS_PWR_STATE_IS_PWR_MASK (0x3 << 3)
+
+#define BUTTRESS_PWR_STATE_PS_PWR_SHIFT 6
+#define BUTTRESS_PWR_STATE_PS_PWR_MASK (0x3 << 6)
+
+#define BUTTRESS_PWR_STATE_RESET 0x0
+#define BUTTRESS_PWR_STATE_PWR_ON_DONE 0x1
+#define BUTTRESS_PWR_STATE_PWR_RDY 0x3
+#define BUTTRESS_PWR_STATE_PWR_IDLE 0x4
+
+#define BUTTRESS_PWR_STATE_HH_STATUS_SHIFT 11
+#define BUTTRESS_PWR_STATE_HH_STATUS_MASK (0x3 << 11)
+
+enum {
+ BUTTRESS_PWR_STATE_HH_STATE_IDLE,
+ BUTTRESS_PWR_STATE_HH_STATE_IN_PRGS,
+ BUTTRESS_PWR_STATE_HH_STATE_DONE,
+ BUTTRESS_PWR_STATE_HH_STATE_ERR,
+};
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_SHIFT 19
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_MASK (0xf << 19)
+
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IDLE 0x0
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PLL_CMP 0x1
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK 0x2
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PG_ACK 0x3
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_ASSRT_CYCLES 0x4
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES1 0x5
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_STOP_CLK_CYCLES2 0x6
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DEASSRT_CYCLES 0x7
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_FUSE_WR_CMP 0x8
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_BRK_POINT 0x9
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_IS_RDY 0xa
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_HALT_HALTED 0xb
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_RST_DURATION_CNT3 0xc
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_CLKACK_PD 0xd
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_PD_BRK_POINT 0xe
+#define BUTTRESS_PWR_STATE_IS_PWR_FSM_WAIT_4_PD_PG_ACK0 0xf
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_SHIFT 24
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_MASK (0x1f << 24)
+
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_IDLE 0x0
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_IP_RDY 0x1
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_PRE_CNT_EXH 0x2
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_VGI_PWRGOOD 0x3
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_RO_POST_CNT_EXH 0x4
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WR_PLL_RATIO 0x5
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_PLL_CMP 0x6
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PU_CLKACK 0x7
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_ASSRT_CYCLES 0x8
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES1 0x9
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_STOP_CLK_CYCLES2 0xa
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RST_DEASSRT_CYCLES 0xb
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PU_BRK_PNT 0xc
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_FUSE_ACCPT 0xd
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_PS_PWR_UP 0xf
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_4_HALTED 0x10
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_RESET_CNT3 0x11
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_CLKACK 0x12
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_PD_OFF_IND 0x13
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PH4 0x14
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_PLL_CMP 0x15
+#define BUTTRESS_PWR_STATE_PS_PWR_FSM_WAIT_DVFS_CLKACK 0x16
+
+#define BUTTRESS_REG_SECURITY_CTL 0x300
+
+#define BUTTRESS_SECURITY_CTL_FW_SECURE_MODE BIT(16)
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_SHIFT 0
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_MASK 0x1f
+
+#define BUTTRESS_SECURITY_CTL_FW_SETUP_DONE 0x1
+#define BUTTRESS_SECURITY_CTL_AUTH_DONE 0x2
+#define BUTTRESS_SECURITY_CTL_AUTH_FAILED 0x8
+
+#define BUTTRESS_REG_SENSOR_FREQ_CTL 0x16c
+
+#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_DEFAULT(i) \
+ (0x1b << ((i) * 10))
+#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_SHIFT(i) ((i) * 10)
+#define BUTTRESS_SENSOR_FREQ_CTL_OSC_OUT_FREQ_MASK(i) \
+ (0x1ff << ((i) * 10))
+
+#define BUTTRESS_SENSOR_CLK_FREQ_6P75MHZ 0x176
+#define BUTTRESS_SENSOR_CLK_FREQ_8MHZ 0x164
+#define BUTTRESS_SENSOR_CLK_FREQ_9P6MHZ 0x2
+#define BUTTRESS_SENSOR_CLK_FREQ_12MHZ 0x1b2
+#define BUTTRESS_SENSOR_CLK_FREQ_13P6MHZ 0x1ac
+#define BUTTRESS_SENSOR_CLK_FREQ_14P4MHZ 0x1cc
+#define BUTTRESS_SENSOR_CLK_FREQ_15P8MHZ 0x1a6
+#define BUTTRESS_SENSOR_CLK_FREQ_16P2MHZ 0xca
+#define BUTTRESS_SENSOR_CLK_FREQ_17P3MHZ 0x12e
+#define BUTTRESS_SENSOR_CLK_FREQ_18P6MHZ 0x1c0
+#define BUTTRESS_SENSOR_CLK_FREQ_19P2MHZ 0x0
+#define BUTTRESS_SENSOR_CLK_FREQ_24MHZ 0xb2
+#define BUTTRESS_SENSOR_CLK_FREQ_26MHZ 0xae
+#define BUTTRESS_SENSOR_CLK_FREQ_27MHZ 0x196
+
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FB_RATIO_MASK 0xff
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_SHIFT 8
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_A_MASK (0x2 << 8)
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_SHIFT 10
+#define BUTTRESS_SENSOR_FREQ_CTL_SEL_MIPICLK_C_MASK (0x2 << 10)
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_FORCE_OFF_SHIFT 12
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_SHIFT 14
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_REF_RATIO_MASK (0x2 << 14)
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_SHIFT 16
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_PVD_RATIO_MASK (0x2 << 16)
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_SHIFT 18
+#define BUTTRESS_SENSOR_FREQ_CTL_LJPLL_OUTPUT_RATIO_MASK (0x2 << 18)
+#define BUTTRESS_SENSOR_FREQ_CTL_START_SHIFT 31
+
+#define BUTTRESS_REG_SENSOR_CLK_CTL 0x170
+
+/* 0 <= i <= 2 */
+#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_EN_SHIFT(i) ((i) * 2)
+#define BUTTRESS_SENSOR_CLK_CTL_OSC_CLK_OUT_SEL_SHIFT(i) ((i) * 2 + 1)
+
+#define BUTTRESS_REG_FW_SOURCE_BASE_LO 0x78
+#define BUTTRESS_REG_FW_SOURCE_BASE_HI 0x7C
+#define BUTTRESS_REG_FW_SOURCE_SIZE 0x80
+
+#define BUTTRESS_REG_ISR_STATUS 0x90
+#define BUTTRESS_REG_ISR_ENABLED_STATUS 0x94
+#define BUTTRESS_REG_ISR_ENABLE 0x98
+#define BUTTRESS_REG_ISR_CLEAR 0x9C
+
+#define BUTTRESS_ISR_IS_IRQ BIT(0)
+#define BUTTRESS_ISR_PS_IRQ BIT(1)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE BIT(2)
+#define BUTTRESS_ISR_IPC_EXEC_DONE_BY_ISH BIT(3)
+#define BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING BIT(4)
+#define BUTTRESS_ISR_IPC_FROM_ISH_IS_WAITING BIT(5)
+#define BUTTRESS_ISR_CSE_CSR_SET BIT(6)
+#define BUTTRESS_ISR_ISH_CSR_SET BIT(7)
+#define BUTTRESS_ISR_SPURIOUS_CMP BIT(8)
+#define BUTTRESS_ISR_WATCHDOG_EXPIRED BIT(9)
+#define BUTTRESS_ISR_PUNIT_2_IUNIT_IRQ BIT(10)
+#define BUTTRESS_ISR_SAI_VIOLATION BIT(11)
+#define BUTTRESS_ISR_HW_ASSERTION BIT(12)
+
+#define BUTTRESS_REG_IU2CSEDB0 0x100
+
+#define BUTTRESS_IU2CSEDB0_BUSY BIT(31)
+#define BUTTRESS_IU2CSEDB0_SHORT_FORMAT_SHIFT 27
+#define BUTTRESS_IU2CSEDB0_CLIENT_ID_SHIFT 10
+#define BUTTRESS_IU2CSEDB0_IPC_CLIENT_ID_VAL 2
+
+#define BUTTRESS_REG_IU2CSEDATA0 0x104
+
+#define BUTTRESS_IU2CSEDATA0_IPC_BOOT_LOAD 1
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_RUN 2
+#define BUTTRESS_IU2CSEDATA0_IPC_AUTH_REPLACE 3
+#define BUTTRESS_IU2CSEDATA0_IPC_UPDATE_SECURE_TOUCH 16
+
+#define BUTTRESS_CSE2IUDATA0_IPC_BOOT_LOAD_DONE 1
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_RUN_DONE 2
+#define BUTTRESS_CSE2IUDATA0_IPC_AUTH_REPLACE_DONE 4
+#define BUTTRESS_CSE2IUDATA0_IPC_UPDATE_SECURE_TOUCH_DONE 16
+
+#define BUTTRESS_REG_IU2CSECSR 0x108
+
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE1 BIT(0)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_COMP_ACTIONS_RST_PHASE2 BIT(1)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_QUERIED_IP_COMP_ACTIONS_RST_PHASE BIT(2)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ASSERTED_REG_VALID_REQ BIT(3)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_ACKED_REG_VALID BIT(4)
+#define BUTTRESS_IU2CSECSR_IPC_PEER_DEASSERTED_REG_VALID_REQ BIT(5)
+
+#define BUTTRESS_REG_CSE2IUDB0 0x304
+#define BUTTRESS_REG_CSE2IUCSR 0x30C
+#define BUTTRESS_REG_CSE2IUDATA0 0x308
+
+/* 0x20 == NACK, 0xf == unknown command */
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK 0xf20
+#define BUTTRESS_CSE2IUDATA0_IPC_NACK_MASK 0xffff
+
+#define BUTTRESS_REG_ISH2IUCSR 0x50
+#define BUTTRESS_REG_ISH2IUDB0 0x54
+#define BUTTRESS_REG_ISH2IUDATA0 0x58
+
+#define BUTTRESS_REG_IU2ISHDB0 0x10C
+#define BUTTRESS_REG_IU2ISHDATA0 0x110
+#define BUTTRESS_REG_IU2ISHDATA1 0x114
+#define BUTTRESS_REG_IU2ISHCSR 0x118
+
+#define BUTTRESS_REG_ISH_START_DETECT 0x198
+#define BUTTRESS_REG_ISH_START_DETECT_MASK 0x19C
+
+#define BUTTRESS_REG_FABRIC_CMD 0x88
+
+#define BUTTRESS_FABRIC_CMD_START_TSC_SYNC BIT(0)
+#define BUTTRESS_FABRIC_CMD_IS_DRAIN BIT(4)
+
+#define BUTTRESS_REG_TSW_CTL 0x120
+#define BUTTRESS_TSW_CTL_SOFT_RESET BIT(8)
+
+#define BUTTRESS_REG_TSC_LO 0x164
+#define BUTTRESS_REG_TSC_HI 0x168
+
+#define BUTTRESS_REG_CSI2_PORT_CONFIG_AB 0x200
+#define BUTTRESS_CSI2_PORT_CONFIG_AB_MUX_MASK 0x1f
+#define BUTTRESS_CSI2_PORT_CONFIG_AB_COMBO_SHIFT_B0 16
+
+#define BUTTRESS_REG_PS_FREQ_CAPABILITIES 0xf7498
+
+#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_SHIFT 24
+#define BUTTRESS_PS_FREQ_CAPABILITIES_LAST_RESOLVED_RATIO_MASK (0xff << 24)
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_SHIFT 16
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MAX_RATIO_MASK (0xff << 16)
+#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_SHIFT 8
+#define BUTTRESS_PS_FREQ_CAPABILITIES_EFFICIENT_RATIO_MASK (0xff << 8)
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_SHIFT 0
+#define BUTTRESS_PS_FREQ_CAPABILITIES_MIN_RATIO_MASK (0xff)
+
+#define BUTTRESS_IRQS (BUTTRESS_ISR_IPC_FROM_CSE_IS_WAITING | \
+ BUTTRESS_ISR_IPC_EXEC_DONE_BY_CSE | \
+ BUTTRESS_ISR_IS_IRQ | \
+ BUTTRESS_ISR_PS_IRQ)
+
+#define IPU6SE_ISYS_PHY_0_BASE 0x10000
+
+/* only use BB0, BB2, BB4, and BB6 on PHY0 */
+#define IPU6SE_ISYS_PHY_BB_NUM 4
+
+/* offset from PHY base */
+#define PHY_CSI_CFG 0xc0
+#define PHY_CSI_RCOMP_CONTROL 0xc8
+#define PHY_CSI_BSCAN_EXCLUDE 0xd8
+
+#define PHY_CPHY_DLL_OVRD(x) (0x100 + 0x100 * (x))
+#define PHY_DPHY_DLL_OVRD(x) (0x14c + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL1(x) (0x110 + 0x100 * (x))
+#define PHY_CPHY_RX_CONTROL2(x) (0x114 + 0x100 * (x))
+#define PHY_DPHY_CFG(x) (0x148 + 0x100 * (x))
+#define PHY_BB_AFE_CONFIG(x) (0x174 + 0x100 * (x))
+
+#endif /* IPU_PLATFORM_BUTTRESS_REGS_H */
new file mode 100644
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_ISYS_CSI2_REG_H
+#define IPU_PLATFORM_ISYS_CSI2_REG_H
+
+#define CSI_REG_BASE 0x220000
+#define CSI_REG_BASE_PORT(id) ((id) * 0x1000)
+
+#define IPU_CSI_PORT_A_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(0))
+#define IPU_CSI_PORT_B_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(1))
+#define IPU_CSI_PORT_C_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(2))
+#define IPU_CSI_PORT_D_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(3))
+#define IPU_CSI_PORT_E_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(4))
+#define IPU_CSI_PORT_F_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(5))
+#define IPU_CSI_PORT_G_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(6))
+#define IPU_CSI_PORT_H_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(7))
+
+/* CSI Port Genral Purpose Registers */
+#define CSI_REG_PORT_GPREG_SRST 0x0
+#define CSI_REG_PORT_GPREG_CSI2_SLV_REG_SRST 0x4
+#define CSI_REG_PORT_GPREG_CSI2_PORT_CONTROL 0x8
+
+/*
+ * Port IRQs mapping events:
+ * IRQ0 - CSI_FE event
+ * IRQ1 - CSI_SYNC
+ * IRQ2 - S2M_SIDS0TO7
+ * IRQ3 - S2M_SIDS8TO15
+ */
+#define CSI_PORT_REG_BASE_IRQ_CSI 0x80
+#define CSI_PORT_REG_BASE_IRQ_CSI_SYNC 0xA0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS0TOS7 0xC0
+#define CSI_PORT_REG_BASE_IRQ_S2M_SIDS8TOS15 0xE0
+
+#define CSI_PORT_REG_BASE_IRQ_EDGE_OFFSET 0x0
+#define CSI_PORT_REG_BASE_IRQ_MASK_OFFSET 0x4
+#define CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET 0x8
+#define CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET 0xc
+#define CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET 0x10
+#define CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET 0x14
+
+#define IPU6SE_CSI_RX_ERROR_IRQ_MASK 0x7ffff
+#define IPU6_CSI_RX_ERROR_IRQ_MASK 0xfffff
+
+#define CSI_RX_NUM_ERRORS_IN_IRQ 20
+#define CSI_RX_NUM_IRQ 32
+
+#define IPU_CSI_RX_IRQ_FS_VC 1
+#define IPU_CSI_RX_IRQ_FE_VC 2
+
+/* PPI2CSI */
+#define CSI_REG_PPI2CSI_ENABLE 0x200
+#define CSI_REG_PPI2CSI_CONFIG_PPI_INTF 0x204
+#define PPI_INTF_CONFIG_NOF_ENABLED_DATALANES_SHIFT 3
+#define PPI_INTF_CONFIG_RX_AUTO_CLKGATING_SHIFT 5
+#define CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE 0x208
+
+enum CSI_PPI2CSI_CTRL {
+ CSI_PPI2CSI_DISABLE = 0,
+ CSI_PPI2CSI_ENABLE = 1,
+};
+
+/* CSI_FE */
+#define CSI_REG_CSI_FE_ENABLE 0x280
+#define CSI_REG_CSI_FE_MODE 0x284
+#define CSI_REG_CSI_FE_MUX_CTRL 0x288
+#define CSI_REG_CSI_FE_SYNC_CNTR_SEL 0x290
+
+enum CSI_FE_ENABLE_TYPE {
+ CSI_FE_DISABLE = 0,
+ CSI_FE_ENABLE = 1,
+};
+
+enum CSI_FE_MODE_TYPE {
+ CSI_FE_DPHY_MODE = 0,
+ CSI_FE_CPHY_MODE = 1,
+};
+
+enum CSI_FE_INPUT_SELECTOR {
+ CSI_SENSOR_INPUT = 0,
+ CSI_MIPIGEN_INPUT = 1,
+};
+
+enum CSI_FE_SYNC_CNTR_SEL_TYPE {
+ CSI_CNTR_SENSOR_LINE_ID = (1 << 0),
+ CSI_CNTR_INT_LINE_PKT_ID = ~CSI_CNTR_SENSOR_LINE_ID,
+ CSI_CNTR_SENSOR_FRAME_ID = (1 << 1),
+ CSI_CNTR_INT_FRAME_PKT_ID = ~CSI_CNTR_SENSOR_FRAME_ID,
+};
+
+/* MIPI_PKT_GEN */
+#define CSI_REG_PIXGEN_COM_BASE_OFFSET 0x300
+
+#define IPU_CSI_PORT_A_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(0) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_B_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(1) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_C_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(2) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_D_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(3) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_E_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(4) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_F_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(5) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_G_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(6) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+#define IPU_CSI_PORT_H_PIXGEN_ADDR_OFFSET \
+ (CSI_REG_BASE + CSI_REG_BASE_PORT(7) + CSI_REG_PIXGEN_COM_BASE_OFFSET)
+
+#define CSI_REG_PIXGEN_COM_ENABLE_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x300)
+#define CSI_REG_PIXGEN_COM_DTYPE_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x304)
+#define CSI_REG_PIXGEN_COM_VTYPE_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x308)
+#define CSI_REG_PIXGEN_COM_VCHAN_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x30C)
+#define CSI_REG_PIXGEN_COM_WCOUNT_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x310)
+/* PRBS */
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG0_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x314)
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG1_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x318)
+/* SYNC_GENERATOR_CONFIG */
+#define CSI_REG_PIXGEN_SYNG_FREE_RUN_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x31C)
+#define CSI_REG_PIXGEN_SYNG_PAUSE_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x320)
+#define CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x324)
+#define CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x328)
+#define CSI_REG_PIXGEN_SYNG_NOF_LINE_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x32C)
+#define CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x330)
+#define CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x334)
+#define CSI_REG_PIXGEN_SYNG_STAT_HCNT_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x338)
+#define CSI_REG_PIXGEN_SYNG_STAT_VCNT_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x33C)
+#define CSI_REG_PIXGEN_SYNG_STAT_FCNT_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x340)
+#define CSI_REG_PIXGEN_SYNG_STAT_DONE_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x344)
+/* TPG */
+#define CSI_REG_PIXGEN_TPG_MODE_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x348)
+/* TPG: mask_cfg */
+#define CSI_REG_PIXGEN_TPG_HCNT_MASK_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x34C)
+#define CSI_REG_PIXGEN_TPG_VCNT_MASK_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x350)
+#define CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x354)
+/* TPG: delta_cfg */
+#define CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x358)
+#define CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG_IDX(id) \
+ (CSI_REG_BASE_PORT(id) + 0x35C)
+/* TPG: color_cfg */
+#define CSI_REG_PIXGEN_TPG_R1_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x360)
+#define CSI_REG_PIXGEN_TPG_G1_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x364)
+#define CSI_REG_PIXGEN_TPG_B1_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x368)
+#define CSI_REG_PIXGEN_TPG_R2_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x36C)
+#define CSI_REG_PIXGEN_TPG_G2_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x370)
+#define CSI_REG_PIXGEN_TPG_B2_REG_IDX(id) (CSI_REG_BASE_PORT(id) + 0x374)
+
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG0 CSI_REG_PIXGEN_PRBS_RSTVAL_REG0_IDX(0)
+#define CSI_REG_PIXGEN_PRBS_RSTVAL_REG1 CSI_REG_PIXGEN_PRBS_RSTVAL_REG1_IDX(0)
+#define CSI_REG_PIXGEN_COM_ENABLE_REG CSI_REG_PIXGEN_COM_ENABLE_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_MODE_REG CSI_REG_PIXGEN_TPG_MODE_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_R1_REG CSI_REG_PIXGEN_TPG_R1_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_G1_REG CSI_REG_PIXGEN_TPG_G1_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_B1_REG CSI_REG_PIXGEN_TPG_B1_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_R2_REG CSI_REG_PIXGEN_TPG_R2_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_G2_REG CSI_REG_PIXGEN_TPG_G2_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_B2_REG CSI_REG_PIXGEN_TPG_B2_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_HCNT_MASK_REG CSI_REG_PIXGEN_TPG_HCNT_MASK_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_VCNT_MASK_REG CSI_REG_PIXGEN_TPG_VCNT_MASK_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG \
+ CSI_REG_PIXGEN_TPG_XYCNT_MASK_REG_IDX(0)
+
+#define CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG \
+ CSI_REG_PIXGEN_SYNG_NOF_FRAME_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_NOF_LINE_REG \
+ CSI_REG_PIXGEN_SYNG_NOF_LINE_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG \
+ CSI_REG_PIXGEN_SYNG_HBLANK_CYC_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG \
+ CSI_REG_PIXGEN_SYNG_VBLANK_CYC_REG_IDX(0)
+#define CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG \
+ CSI_REG_PIXGEN_SYNG_NOF_PIXEL_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_WCOUNT_REG CSI_REG_PIXGEN_COM_WCOUNT_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_DTYPE_REG CSI_REG_PIXGEN_COM_DTYPE_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_VTYPE_REG CSI_REG_PIXGEN_COM_VTYPE_REG_IDX(0)
+#define CSI_REG_PIXGEN_COM_VCHAN_REG CSI_REG_PIXGEN_COM_VCHAN_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG \
+ CSI_REG_PIXGEN_TPG_HCNT_DELTA_REG_IDX(0)
+#define CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG \
+ CSI_REG_PIXGEN_TPG_VCNT_DELTA_REG_IDX(0)
+
+/* CSI HUB General Purpose Registers */
+#define CSI_REG_HUB_GPREG_SRST (CSI_REG_BASE + 0x18000)
+#define CSI_REG_HUB_GPREG_SLV_REG_SRST (CSI_REG_BASE + 0x18004)
+#define CSI_REG_HUB_GPREG_PHY_CONTROL(id) \
+ (CSI_REG_BASE + 0x18008 + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_CONTROL_RESET BIT(4)
+#define CSI_REG_HUB_GPREG_PHY_CONTROL_PWR_EN BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_STATUS(id) \
+ (CSI_REG_BASE + 0x1800c + (id) * 0x8)
+#define CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK BIT(0)
+#define CSI_REG_HUB_GPREG_PHY_STATUS_PHY_READY BIT(4)
+
+#define CSI_REG_HUB_DRV_ACCESS_PORT(id) (CSI_REG_BASE + 0x18018 + (id) * 4)
+#define CSI_REG_HUB_FW_ACCESS_PORT(id) (CSI_REG_BASE + 0x17000 + (id) * 4)
+
+enum CSI_PORT_CLK_GATING_SWITCH {
+ CSI_PORT_CLK_GATING_OFF = 0,
+ CSI_PORT_CLK_GATING_ON = 1,
+};
+
+#define CSI_REG_BASE_HUB_IRQ 0x18200
+
+#define IPU_NUM_OF_DLANE_REG_PORT0 2
+#define IPU_NUM_OF_DLANE_REG_PORT1 4
+#define IPU_NUM_OF_DLANE_REG_PORT2 4
+#define IPU_NUM_OF_DLANE_REG_PORT3 2
+#define IPU_NUM_OF_DLANE_REG_PORT4 2
+#define IPU_NUM_OF_DLANE_REG_PORT5 4
+#define IPU_NUM_OF_DLANE_REG_PORT6 4
+#define IPU_NUM_OF_DLANE_REG_PORT7 2
+
+/* ipu6se support 2 SIPs, 2 port per SIP, 4 ports 0..3
+ * sip0 - 0, 1
+ * sip1 - 2, 3
+ * 0 and 2 support 4 data lanes, 1 and 3 support 2 data lanes
+ * all offset are base from isys base address
+ */
+
+#define CSI2_HUB_GPREG_SIP_SRST(sip) (0x238038 + (sip) * 4)
+#define CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip) (0x238050 + (sip) * 4)
+
+#define CSI2_HUB_GPREG_DPHY_TIMER_INCR (0x238040)
+#define CSI2_HUB_GPREG_HPLL_FREQ (0x238044)
+#define CSI2_HUB_GPREG_IS_CLK_RATIO (0x238048)
+#define CSI2_HUB_GPREG_HPLL_FREQ_ISCLK_RATE_OVERRIDE (0x23804c)
+#define CSI2_HUB_GPREG_PORT_CLKGATING_DISABLE (0x238058)
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL (0x23805c)
+#define CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL (0x238088)
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL (0x2380a4)
+#define CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL (0x2380d0)
+
+#define CSI2_SIP_TOP_CSI_RX_BASE(sip) (0x23805c + (sip) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port) (0x23805c + ((port) / 2) * 0x48)
+#define CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) (0x238088 + ((port) / 2) * 0x48)
+
+/* offset from port base */
+#define CSI2_SIP_TOP_CSI_RX_PORT_CONTROL (0x0)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE (0x4)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE (0x8)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(lane) (0xc + (lane) * 8)
+#define CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(lane) (0x10 + (lane) * 8)
+
+#endif /* IPU6_ISYS_CSI2_REG_H */
new file mode 100644
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_ISYS_H
+#define IPU_PLATFORM_ISYS_H
+
+#define IPU_ISYS_ENTITY_PREFIX "Intel IPU6"
+
+/*
+ * FW support max 16 streams
+ */
+#define IPU_ISYS_MAX_STREAMS 16
+
+#define ISYS_UNISPART_IRQS (IPU_ISYS_UNISPART_IRQ_SW | \
+ IPU_ISYS_UNISPART_IRQ_CSI0 | \
+ IPU_ISYS_UNISPART_IRQ_CSI1)
+
+/* IPU6 ISYS compression alignment */
+#define IPU_ISYS_COMPRESSION_LINE_ALIGN 512
+#define IPU_ISYS_COMPRESSION_HEIGHT_ALIGN 1
+#define IPU_ISYS_COMPRESSION_TILE_SIZE_BYTES 512
+#define IPU_ISYS_COMPRESSION_PAGE_ALIGN 0x1000
+#define IPU_ISYS_COMPRESSION_TILE_STATUS_BITS 4
+#define IPU_ISYS_COMPRESSION_MAX 3
+
+#endif
new file mode 100644
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_PSYS_H
+#define IPU_PLATFORM_PSYS_H
+
+#include "ipu-psys.h"
+#include <uapi/linux/ipu-psys.h>
+
+#define IPU_PSYS_BUF_SET_POOL_SIZE 8
+#define IPU_PSYS_BUF_SET_MAX_SIZE 1024
+
+struct ipu_fw_psys_buffer_set;
+
+enum ipu_psys_cmd_state {
+ KCMD_STATE_PPG_NEW,
+ KCMD_STATE_PPG_START,
+ KCMD_STATE_PPG_ENQUEUE,
+ KCMD_STATE_PPG_STOP,
+ KCMD_STATE_PPG_COMPLETE
+};
+
+struct ipu_psys_scheduler {
+ struct list_head ppgs;
+ struct mutex bs_mutex; /* Protects buf_set field */
+ struct list_head buf_sets;
+};
+
+enum ipu_psys_ppg_state {
+ PPG_STATE_START = (1 << 0),
+ PPG_STATE_STARTING = (1 << 1),
+ PPG_STATE_STARTED = (1 << 2),
+ PPG_STATE_RUNNING = (1 << 3),
+ PPG_STATE_SUSPEND = (1 << 4),
+ PPG_STATE_SUSPENDING = (1 << 5),
+ PPG_STATE_SUSPENDED = (1 << 6),
+ PPG_STATE_RESUME = (1 << 7),
+ PPG_STATE_RESUMING = (1 << 8),
+ PPG_STATE_RESUMED = (1 << 9),
+ PPG_STATE_STOP = (1 << 10),
+ PPG_STATE_STOPPING = (1 << 11),
+ PPG_STATE_STOPPED = (1 << 12),
+};
+
+struct ipu_psys_ppg {
+ struct ipu_psys_pg *kpg;
+ struct ipu_psys_fh *fh;
+ struct list_head list;
+ struct list_head sched_list;
+ u64 token;
+ void *manifest;
+ struct mutex mutex; /* Protects kcmd and ppg state field */
+ struct list_head kcmds_new_list;
+ struct list_head kcmds_processing_list;
+ struct list_head kcmds_finished_list;
+ enum ipu_psys_ppg_state state;
+ u32 pri_base;
+ int pri_dynamic;
+};
+
+struct ipu_psys_buffer_set {
+ struct list_head list;
+ struct ipu_fw_psys_buffer_set *buf_set;
+ size_t size;
+ size_t buf_set_size;
+ dma_addr_t dma_addr;
+ void *kaddr;
+ struct ipu_psys_kcmd *kcmd;
+};
+
+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd);
+void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg,
+ struct ipu_psys_kcmd *kcmd,
+ int error);
+int ipu_psys_fh_init(struct ipu_psys_fh *fh);
+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh);
+
+#endif /* IPU_PLATFORM_PSYS_H */
new file mode 100644
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_REGS_H
+#define IPU_PLATFORM_REGS_H
+
+/*
+ * IPU6 uses uniform address within IPU, therefore all subsystem registers
+ * locates in one signle space starts from 0 but in different sctions with
+ * different addresses, the subsystem offsets are defined to 0 as the
+ * register definition will have the address offset to 0.
+ */
+#define IPU_UNIFIED_OFFSET 0
+
+#define IPU_ISYS_IOMMU0_OFFSET 0x2E0000
+#define IPU_ISYS_IOMMU1_OFFSET 0x2E0500
+#define IPU_ISYS_IOMMUI_OFFSET 0x2E0A00
+
+#define IPU_PSYS_IOMMU0_OFFSET 0x1B0000
+#define IPU_PSYS_IOMMU1_OFFSET 0x1B0700
+#define IPU_PSYS_IOMMU1R_OFFSET 0x1B0E00
+#define IPU_PSYS_IOMMUI_OFFSET 0x1B1500
+
+/* the offset from IOMMU base register */
+#define IPU_MMU_L1_STREAM_ID_REG_OFFSET 0x0c
+#define IPU_MMU_L2_STREAM_ID_REG_OFFSET 0x4c
+#define IPU_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET 0x8c
+
+#define IPU_MMU_INFO_OFFSET 0x8
+
+#define IPU_ISYS_SPC_OFFSET 0x210000
+
+#define IPU6SE_PSYS_SPC_OFFSET 0x110000
+#define IPU6_PSYS_SPC_OFFSET 0x118000
+
+#define IPU_ISYS_DMEM_OFFSET 0x200000
+#define IPU_PSYS_DMEM_OFFSET 0x100000
+
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE 0x238200
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK 0x238204
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS 0x238208
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR 0x23820c
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE 0x238210
+#define IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE 0x238214
+
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_EDGE 0x238220
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_MASK 0x238224
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_STATUS 0x238228
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_CLEAR 0x23822c
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_ENABLE 0x238230
+#define IPU_REG_ISYS_CSI_TOP_CTRL1_IRQ_LEVEL_NOT_PULSE 0x238234
+
+#define IPU_REG_ISYS_UNISPART_IRQ_EDGE 0x27c000
+#define IPU_REG_ISYS_UNISPART_IRQ_MASK 0x27c004
+#define IPU_REG_ISYS_UNISPART_IRQ_STATUS 0x27c008
+#define IPU_REG_ISYS_UNISPART_IRQ_CLEAR 0x27c00c
+#define IPU_REG_ISYS_UNISPART_IRQ_ENABLE 0x27c010
+#define IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE 0x27c014
+#define IPU_REG_ISYS_UNISPART_SW_IRQ_REG 0x27c414
+#define IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG 0x27c418
+#define IPU_ISYS_UNISPART_IRQ_CSI0 BIT(2)
+#define IPU_ISYS_UNISPART_IRQ_CSI1 BIT(3)
+#define IPU_ISYS_UNISPART_IRQ_SW BIT(22)
+
+#define IPU_REG_ISYS_ISL_TOP_IRQ_EDGE 0x2b0200
+#define IPU_REG_ISYS_ISL_TOP_IRQ_MASK 0x2b0204
+#define IPU_REG_ISYS_ISL_TOP_IRQ_STATUS 0x2b0208
+#define IPU_REG_ISYS_ISL_TOP_IRQ_CLEAR 0x2b020c
+#define IPU_REG_ISYS_ISL_TOP_IRQ_ENABLE 0x2b0210
+#define IPU_REG_ISYS_ISL_TOP_IRQ_LEVEL_NOT_PULSE 0x2b0214
+
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_EDGE 0x2d2100
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_MASK 0x2d2104
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_STATUS 0x2d2108
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_CLEAR 0x2d210c
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_ENABLE 0x2d2110
+#define IPU_REG_ISYS_CMPR_TOP_IRQ_LEVEL_NOT_PULSE 0x2d2114
+
+/* CDC Burst collector thresholds for isys - 3 FIFOs i = 0..2 */
+#define IPU_REG_ISYS_CDC_THRESHOLD(i) (0x27c400 + ((i) * 4))
+
+#define IPU_ISYS_CSI_PHY_NUM 2
+#define IPU_CSI_IRQ_NUM_PER_PIPE 4
+#define IPU6SE_ISYS_CSI_PORT_NUM 4
+#define IPU6_ISYS_CSI_PORT_NUM 8
+
+#define IPU_ISYS_CSI_PORT_IRQ(irq_num) (1 << (irq_num))
+
+/* PSYS Info bits*/
+#define IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(a) (0x2C + ((a) * 12))
+#define IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(a) (0x5C + ((a) * 12))
+
+/* PKG DIR OFFSET in IMR in secure mode */
+#define IPU_PKG_DIR_IMR_OFFSET 0x40
+
+#define IPU_ISYS_REG_SPC_STATUS_CTRL 0x0
+
+#define IPU_ISYS_SPC_STATUS_START BIT(1)
+#define IPU_ISYS_SPC_STATUS_RUN BIT(3)
+#define IPU_ISYS_SPC_STATUS_READY BIT(5)
+#define IPU_ISYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12)
+#define IPU_ISYS_SPC_STATUS_ICACHE_PREFETCH BIT(13)
+
+#define IPU_PSYS_REG_SPC_STATUS_CTRL 0x0
+#define IPU_PSYS_REG_SPC_START_PC 0x4
+#define IPU_PSYS_REG_SPC_ICACHE_BASE 0x10
+#define IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER 0x14
+
+#define IPU_PSYS_SPC_STATUS_START BIT(1)
+#define IPU_PSYS_SPC_STATUS_RUN BIT(3)
+#define IPU_PSYS_SPC_STATUS_READY BIT(5)
+#define IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE BIT(12)
+#define IPU_PSYS_SPC_STATUS_ICACHE_PREFETCH BIT(13)
+
+#define IPU_PSYS_REG_SPP0_STATUS_CTRL 0x20000
+
+#define IPU_INFO_ENABLE_SNOOP BIT(0)
+#define IPU_INFO_DEC_FORCE_FLUSH BIT(1)
+#define IPU_INFO_DEC_PASS_THRU BIT(2)
+#define IPU_INFO_ZLW BIT(3)
+#define IPU_INFO_STREAM_ID_SET(id) (((id) & 0x1F) << 4)
+#define IPU_INFO_REQUEST_DESTINATION_IOSF BIT(9)
+#define IPU_INFO_IMR_BASE BIT(10)
+#define IPU_INFO_IMR_DESTINED BIT(11)
+
+#define IPU_INFO_REQUEST_DESTINATION_PRIMARY IPU_INFO_REQUEST_DESTINATION_IOSF
+
+/* Trace unit related register definitions */
+#define TRACE_REG_MAX_ISYS_OFFSET 0xfffff
+#define TRACE_REG_MAX_PSYS_OFFSET 0xfffff
+#define IPU_ISYS_OFFSET IPU_ISYS_DMEM_OFFSET
+#define IPU_PSYS_OFFSET IPU_PSYS_DMEM_OFFSET
+/* ISYS trace unit registers */
+/* Trace unit base offset */
+#define IPU_TRACE_REG_IS_TRACE_UNIT_BASE 0x27d000
+/* Trace monitors */
+#define IPU_TRACE_REG_IS_SP_EVQ_BASE 0x211000
+/* GPC blocks */
+#define IPU_TRACE_REG_IS_SP_GPC_BASE 0x210800
+#define IPU_TRACE_REG_IS_ISL_GPC_BASE 0x2b0a00
+#define IPU_TRACE_REG_IS_MMU_GPC_BASE 0x2e0f00
+/* each CSI2 port has a dedicated trace monitor, index 0..7 */
+#define IPU_TRACE_REG_CSI2_TM_BASE(port) (0x220400 + 0x1000 * (port))
+
+/* Trace timers */
+#define IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N 0x27c410
+#define TRACE_REG_GPREG_TRACE_TIMER_RST_OFF BIT(0)
+
+/* SIG2CIO */
+#define IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(port) \
+ (0x220e00 + (port) * 0x1000)
+
+/* PSYS trace unit registers */
+/* Trace unit base offset */
+#define IPU_TRACE_REG_PS_TRACE_UNIT_BASE 0x1b4000
+/* Trace monitors */
+#define IPU_TRACE_REG_PS_SPC_EVQ_BASE 0x119000
+#define IPU_TRACE_REG_PS_SPP0_EVQ_BASE 0x139000
+
+/* GPC blocks */
+#define IPU_TRACE_REG_PS_SPC_GPC_BASE 0x118800
+#define IPU_TRACE_REG_PS_SPP0_GPC_BASE 0x138800
+#define IPU_TRACE_REG_PS_MMU_GPC_BASE 0x1b1b00
+
+/* Trace timers */
+#define IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N 0x1aa714
+
+/*
+ * s2m_pixel_soc_pixel_remapping is dedicated for the enableing of the
+ * pixel s2m remp ability.Remap here means that s2m rearange the order
+ * of the pixels in each 4 pixels group.
+ * For examle, mirroring remping means that if input's 4 first pixels
+ * are 1 2 3 4 then in output we should see 4 3 2 1 in this 4 first pixels.
+ * 0xE4 is from s2m MAS document. It means no remaping.
+ */
+#define S2M_PIXEL_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xE4
+/*
+ * csi_be_soc_pixel_remapping is for the enabling of the csi\mipi be pixel
+ * remapping feature. This remapping is exactly like the stream2mmio remapping.
+ */
+#define CSI_BE_SOC_PIXEL_REMAPPING_FLAG_NO_REMAPPING 0xE4
+
+#define IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR 0x1AE000
+#define IPU_REG_DMA_TOP_AB_GROUP2_BASE_ADDR 0x1AF000
+#define IPU_REG_DMA_TOP_AB_RING_MIN_OFFSET(n) (0x4 + (n) * 0xC)
+#define IPU_REG_DMA_TOP_AB_RING_MAX_OFFSET(n) (0x8 + (n) * 0xC)
+#define IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(n) (0xC + (n) * 0xC)
+
+enum ipu_device_ab_group1_target_id {
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R0_SPC_DMEM,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R1_SPC_DMEM,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R2_SPC_DMEM,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R6_SPC_EQ,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R7_SPC_RESERVED,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R8_SPC_RESERVED,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R9_SPP0,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R10_SPP1,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R11_CENTRAL_R1,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R12_IRQ,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R13_CENTRAL_R2,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R14_DMA,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R15_DMA,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R16_GP,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R17_ZLW_INSERTER,
+ IPU_DEVICE_AB_GROUP1_TARGET_ID_R18_AB,
+};
+
+enum nci_ab_access_mode {
+ NCI_AB_ACCESS_MODE_RW, /* read & write */
+ NCI_AB_ACCESS_MODE_RO, /* read only */
+ NCI_AB_ACCESS_MODE_WO, /* write only */
+ NCI_AB_ACCESS_MODE_NA /* No access at all */
+};
+
+/*
+ * 3:0 CSI_PORT.irq_out[3:0] CSI_PORT_CTRL0 IRQ outputs (4bits)
+ * [0] CSI_PORT.IRQ_CTRL0_csi
+ * [1] CSI_PORT.IRQ_CTRL1_csi_sync
+ * [2] CSI_PORT.IRQ_CTRL2_s2m_sids0to7
+ * [3] CSI_PORT.IRQ_CTRL3_s2m_sids8to15
+ */
+#define IPU_ISYS_UNISPART_IRQ_CSI2(port) \
+ (0x3 << ((port) * IPU_CSI_IRQ_NUM_PER_PIPE))
+
+/* IRQ-related registers in PSYS */
+#define IPU_REG_PSYS_GPDEV_IRQ_EDGE 0x1aa200
+#define IPU_REG_PSYS_GPDEV_IRQ_MASK 0x1aa204
+#define IPU_REG_PSYS_GPDEV_IRQ_STATUS 0x1aa208
+#define IPU_REG_PSYS_GPDEV_IRQ_CLEAR 0x1aa20c
+#define IPU_REG_PSYS_GPDEV_IRQ_ENABLE 0x1aa210
+#define IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE 0x1aa214
+/* There are 8 FW interrupts, n = 0..7 */
+#define IPU_PSYS_GPDEV_FWIRQ0 5
+#define IPU_PSYS_GPDEV_FWIRQ1 6
+#define IPU_PSYS_GPDEV_FWIRQ2 7
+#define IPU_PSYS_GPDEV_FWIRQ3 8
+#define IPU_PSYS_GPDEV_FWIRQ4 9
+#define IPU_PSYS_GPDEV_FWIRQ5 10
+#define IPU_PSYS_GPDEV_FWIRQ6 11
+#define IPU_PSYS_GPDEV_FWIRQ7 12
+#define IPU_PSYS_GPDEV_IRQ_FWIRQ(n) (1 << (n))
+#define IPU_REG_PSYS_GPDEV_FWIRQ(n) (4 * (n) + 0x1aa100)
+
+/*
+ * ISYS GPC (Gerneral Performance Counters) Registers
+ */
+#define IPU_ISYS_GPC_BASE 0x2E0000
+#define IPU_ISYS_GPREG_TRACE_TIMER_RST 0x27C410
+enum ipu_isf_cdc_mmu_gpc_registers {
+ IPU_ISF_CDC_MMU_GPC_SOFT_RESET = 0x0F00,
+ IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE = 0x0F04,
+ IPU_ISF_CDC_MMU_GPC_ENABLE0 = 0x0F20,
+ IPU_ISF_CDC_MMU_GPC_VALUE0 = 0x0F60,
+ IPU_ISF_CDC_MMU_GPC_CNT_SEL0 = 0x0FA0,
+};
+
+/*
+ * GPC (Gerneral Performance Counters) Registers
+ */
+#define IPU_GPC_BASE 0x1B0000
+#define IPU_GPREG_TRACE_TIMER_RST 0x1AA714
+enum ipu_cdc_mmu_gpc_registers {
+ IPU_CDC_MMU_GPC_SOFT_RESET = 0x1B00,
+ IPU_CDC_MMU_GPC_OVERALL_ENABLE = 0x1B04,
+ IPU_CDC_MMU_GPC_TRACE_HEADER = 0x1B08,
+ IPU_CDC_MMU_GPC_TRACE_ADDR = 0x1B0C,
+ IPU_CDC_MMU_GPC_TRACE_ENABLE_NPK = 0x1B10,
+ IPU_CDC_MMU_GPC_TRACE_ENABLE_DDR = 0x1B14,
+ IPU_CDC_MMU_GPC_LP_CLEAR = 0x1B18,
+ IPU_CDC_MMU_GPC_LOST_PACKET = 0x1B1C,
+ IPU_CDC_MMU_GPC_ENABLE0 = 0x1B20,
+ IPU_CDC_MMU_GPC_VALUE0 = 0x1B60,
+ IPU_CDC_MMU_GPC_CNT_SEL0 = 0x1BA0,
+ IPU_CDC_MMU_GPC_START_SEL0 = 0x1BE0,
+ IPU_CDC_MMU_GPC_STOP_SEL0 = 0x1C20,
+ IPU_CDC_MMU_GPC_MSG_SEL0 = 0x1C60,
+ IPU_CDC_MMU_GPC_PLOAD_SEL0 = 0x1CA0,
+ IPU_CDC_MMU_GPC_IRQ_TRIGGER_VALUE0 = 0x1CE0,
+ IPU_CDC_MMU_GPC_IRQ_TIMER_SEL0 = 0x1D20,
+ IPU_CDC_MMU_GPC_IRQ_ENABLE0 = 0x1D60,
+ IPU_CDC_MMU_GPC_END = 0x1D9C
+};
+
+#define IPU_GPC_SENSE_OFFSET 7
+#define IPU_GPC_ROUTE_OFFSET 5
+#define IPU_GPC_SOURCE_OFFSET 0
+
+/*
+ * Signals monitored by GPC
+ */
+#define IPU_GPC_TRACE_TLB_MISS_MMU_LB_IDX 0
+#define IPU_GPC_TRACE_FULL_WRITE_LB_IDX 1
+#define IPU_GPC_TRACE_NOFULL_WRITE_LB_IDX 2
+#define IPU_GPC_TRACE_FULL_READ_LB_IDX 3
+#define IPU_GPC_TRACE_NOFULL_READ_LB_IDX 4
+#define IPU_GPC_TRACE_STALL_LB_IDX 5
+#define IPU_GPC_TRACE_ZLW_LB_IDX 6
+#define IPU_GPC_TRACE_TLB_MISS_MMU_HBTX_IDX 7
+#define IPU_GPC_TRACE_FULL_WRITE_HBTX_IDX 8
+#define IPU_GPC_TRACE_NOFULL_WRITE_HBTX_IDX 9
+#define IPU_GPC_TRACE_FULL_READ_HBTX_IDX 10
+#define IPU_GPC_TRACE_STALL_HBTX_IDX 11
+#define IPU_GPC_TRACE_ZLW_HBTX_IDX 12
+#define IPU_GPC_TRACE_TLB_MISS_MMU_HBFRX_IDX 13
+#define IPU_GPC_TRACE_FULL_READ_HBFRX_IDX 14
+#define IPU_GPC_TRACE_NOFULL_READ_HBFRX_IDX 15
+#define IPU_GPC_TRACE_STALL_HBFRX_IDX 16
+#define IPU_GPC_TRACE_ZLW_HBFRX_IDX 17
+#define IPU_GPC_TRACE_TLB_MISS_ICACHE_IDX 18
+#define IPU_GPC_TRACE_FULL_READ_ICACHE_IDX 19
+#define IPU_GPC_TRACE_STALL_ICACHE_IDX 20
+/*
+ * psys subdomains power request regs
+ */
+enum ipu_device_buttress_psys_domain_pos {
+ IPU_PSYS_SUBDOMAIN_ISA = 0,
+ IPU_PSYS_SUBDOMAIN_PSA = 1,
+ IPU_PSYS_SUBDOMAIN_BB = 2,
+ IPU_PSYS_SUBDOMAIN_IDSP1 = 3, /* only in IPU6M */
+ IPU_PSYS_SUBDOMAIN_IDSP2 = 4, /* only in IPU6M */
+};
+
+#define IPU_PSYS_SUBDOMAINS_POWER_MASK (BIT(IPU_PSYS_SUBDOMAIN_ISA) | \
+ BIT(IPU_PSYS_SUBDOMAIN_PSA) | \
+ BIT(IPU_PSYS_SUBDOMAIN_BB))
+
+#define IPU_PSYS_SUBDOMAINS_POWER_REQ 0xa0
+#define IPU_PSYS_SUBDOMAINS_POWER_STATUS 0xa4
+
+#endif /* IPU_PLATFORM_REGS_H */
new file mode 100644
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_RESOURCES_COMMON_H
+#define IPU_PLATFORM_RESOURCES_COMMON_H
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST 0
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_STRUCT 0
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_GROUP_STRUCT 2
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT 2
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_TERMINAL_STRUCT 5
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_PARAM_TERMINAL_STRUCT 6
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_DATA_TERMINAL_STRUCT 3
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_DESC_STRUCT 3
+#define IPU_FW_PSYS_N_FRAME_PLANES 6
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_FRAME_STRUCT 4
+
+#define IPU_FW_PSYS_N_PADDING_UINT8_IN_BUFFER_SET_STRUCT 1
+
+#define IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES 4
+#define IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES 4
+
+#define IPU_FW_PSYS_PROCESS_MAX_CELLS 1
+#define IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS 4
+#define IPU_FW_PSYS_RBM_NOF_ELEMS 5
+#define IPU_FW_PSYS_KBM_NOF_ELEMS 4
+
+struct ipu_fw_psys_process {
+ s16 parent_offset;
+ u8 size;
+ u8 cell_dependencies_offset;
+ u8 terminal_dependencies_offset;
+ u8 process_extension_offset;
+ u8 ID;
+ u8 program_idx;
+ u8 state;
+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS];
+ u8 cell_dependency_count;
+ u8 terminal_dependency_count;
+};
+
+struct ipu_fw_psys_program_manifest {
+ u32 kernel_bitmap[IPU_FW_PSYS_KERNEL_BITMAP_NOF_ELEMS];
+ s16 parent_offset;
+ u8 program_dependency_offset;
+ u8 terminal_dependency_offset;
+ u8 size;
+ u8 program_extension_offset;
+ u8 program_type;
+ u8 ID;
+ u8 cells[IPU_FW_PSYS_PROCESS_MAX_CELLS];
+ u8 cell_type_id;
+ u8 program_dependency_count;
+ u8 terminal_dependency_count;
+};
+
+/* platform specific resource interface */
+struct ipu_psys_resource_pool;
+struct ipu_psys_resource_alloc;
+struct ipu_fw_psys_process_group;
+int ipu_psys_allocate_resources(const struct device *dev,
+ struct ipu_fw_psys_process_group *pg,
+ void *pg_manifest,
+ struct ipu_psys_resource_alloc *alloc,
+ struct ipu_psys_resource_pool *pool);
+int ipu_psys_move_resources(const struct device *dev,
+ struct ipu_psys_resource_alloc *alloc,
+ struct ipu_psys_resource_pool *source_pool,
+ struct ipu_psys_resource_pool *target_pool);
+
+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src,
+ struct ipu_psys_resource_pool *dest);
+
+int ipu_psys_try_allocate_resources(struct device *dev,
+ struct ipu_fw_psys_process_group *pg,
+ void *pg_manifest,
+ struct ipu_psys_resource_pool *pool);
+
+void ipu_psys_reset_process_cell(const struct device *dev,
+ struct ipu_fw_psys_process_group *pg,
+ void *pg_manifest,
+ int process_count);
+void ipu_psys_free_resources(struct ipu_psys_resource_alloc *alloc,
+ struct ipu_psys_resource_pool *pool);
+
+int ipu_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+ u16 id, u32 bitmap,
+ u32 active_bitmap);
+
+int ipu_psys_allocate_cmd_queue_resource(struct ipu_psys_resource_pool *pool);
+void ipu_psys_free_cmd_queue_resource(struct ipu_psys_resource_pool *pool,
+ u8 queue_id);
+
+extern const struct ipu_fw_resource_definitions *ipu6_res_defs;
+extern const struct ipu_fw_resource_definitions *ipu6se_res_defs;
+extern const struct ipu_fw_resource_definitions *ipu6ep_res_defs;
+extern struct ipu6_psys_hw_res_variant hw_var;
+#endif /* IPU_PLATFORM_RESOURCES_COMMON_H */
new file mode 100644
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef IPU_PLATFORM_H
+#define IPU_PLATFORM_H
+
+#define IPU_NAME "intel-ipu6"
+
+#define IPU6SE_FIRMWARE_NAME "intel/ipu6se_fw.bin"
+#define IPU6EP_FIRMWARE_NAME "intel/ipu6ep_fw.bin"
+#define IPU6_FIRMWARE_NAME "intel/ipu6_fw.bin"
+
+/*
+ * The following definitions are encoded to the media_device's model field so
+ * that the software components which uses IPU driver can get the hw stepping
+ * information.
+ */
+#define IPU_MEDIA_DEV_MODEL_NAME "ipu6"
+
+#define IPU6SE_ISYS_NUM_STREAMS IPU6SE_NONSECURE_STREAM_ID_MAX
+#define IPU6_ISYS_NUM_STREAMS IPU6_NONSECURE_STREAM_ID_MAX
+
+/* declearations, definitions in ipu6.c */
+extern struct ipu_isys_internal_pdata isys_ipdata;
+extern struct ipu_psys_internal_pdata psys_ipdata;
+extern const struct ipu_buttress_ctrl isys_buttress_ctrl;
+extern const struct ipu_buttress_ctrl psys_buttress_ctrl;
+
+/* definitions in ipu6-isys.c */
+extern struct ipu_trace_block isys_trace_blocks[];
+/* definitions in ipu6-psys.c */
+extern struct ipu_trace_block psys_trace_blocks[];
+
+#endif
new file mode 100644
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2020 Intel Corporation
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include <uapi/linux/ipu-psys.h>
+
+#include "ipu-fw-psys.h"
+#include "ipu-psys.h"
+
+struct ipu6_psys_hw_res_variant hw_var;
+void ipu6_psys_hw_res_variant_init(void)
+{
+ if (ipu_ver == IPU_VER_6SE) {
+ hw_var.queue_num = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ hw_var.cell_num = IPU6SE_FW_PSYS_N_CELL_ID;
+ } else if (ipu_ver == IPU_VER_6) {
+ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ hw_var.cell_num = IPU6_FW_PSYS_N_CELL_ID;
+ } else if (ipu_ver == IPU_VER_6EP) {
+ hw_var.queue_num = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ hw_var.cell_num = IPU6EP_FW_PSYS_N_CELL_ID;
+ } else {
+ WARN(1, "ipu6 psys res var is not initialised correctly.");
+ }
+
+ hw_var.set_proc_dev_chn = ipu6_fw_psys_set_proc_dev_chn;
+ hw_var.set_proc_dfm_bitmap = ipu6_fw_psys_set_proc_dfm_bitmap;
+ hw_var.set_proc_ext_mem = ipu6_fw_psys_set_process_ext_mem;
+ hw_var.get_pgm_by_proc =
+ ipu6_fw_psys_get_program_manifest_by_process;
+ return;
+}
+
+static const struct ipu_fw_resource_definitions *get_res(void)
+{
+ if (ipu_ver == IPU_VER_6SE)
+ return ipu6se_res_defs;
+
+ if (ipu_ver == IPU_VER_6EP)
+ return ipu6ep_res_defs;
+
+ return ipu6_res_defs;
+}
+
+static int ipu_resource_init(struct ipu_resource *res, u32 id, int elements)
+{
+ if (elements <= 0) {
+ res->bitmap = NULL;
+ return 0;
+ }
+
+ res->bitmap = bitmap_zalloc(elements, GFP_KERNEL);
+ if (!res->bitmap)
+ return -ENOMEM;
+ res->elements = elements;
+ res->id = id;
+ return 0;
+}
+
+static unsigned long
+ipu_resource_alloc_with_pos(struct ipu_resource *res, int n,
+ int pos,
+ struct ipu_resource_alloc *alloc,
+ enum ipu_resource_type type)
+{
+ unsigned long p;
+
+ if (n <= 0) {
+ alloc->elements = 0;
+ return 0;
+ }
+
+ if (!res->bitmap || pos >= res->elements)
+ return (unsigned long)(-ENOSPC);
+
+ p = bitmap_find_next_zero_area(res->bitmap, res->elements, pos, n, 0);
+ alloc->resource = NULL;
+
+ if (p != pos)
+ return (unsigned long)(-ENOSPC);
+ bitmap_set(res->bitmap, p, n);
+ alloc->resource = res;
+ alloc->elements = n;
+ alloc->pos = p;
+ alloc->type = type;
+
+ return pos;
+}
+
+static unsigned long
+ipu_resource_alloc(struct ipu_resource *res, int n,
+ struct ipu_resource_alloc *alloc,
+ enum ipu_resource_type type)
+{
+ unsigned long p;
+
+ if (n <= 0) {
+ alloc->elements = 0;
+ return 0;
+ }
+
+ if (!res->bitmap)
+ return (unsigned long)(-ENOSPC);
+
+ p = bitmap_find_next_zero_area(res->bitmap, res->elements, 0, n, 0);
+ alloc->resource = NULL;
+
+ if (p >= res->elements)
+ return (unsigned long)(-ENOSPC);
+ bitmap_set(res->bitmap, p, n);
+ alloc->resource = res;
+ alloc->elements = n;
+ alloc->pos = p;
+ alloc->type = type;
+
+ return p;
+}
+
+static void ipu_resource_free(struct ipu_resource_alloc *alloc)
+{
+ if (alloc->elements <= 0)
+ return;
+
+ if (alloc->type == IPU_RESOURCE_DFM)
+ *alloc->resource->bitmap &= ~(unsigned long)(alloc->elements);
+ else
+ bitmap_clear(alloc->resource->bitmap, alloc->pos,
+ alloc->elements);
+ alloc->resource = NULL;
+}
+
+static void ipu_resource_cleanup(struct ipu_resource *res)
+{
+ bitmap_free(res->bitmap);
+ res->bitmap = NULL;
+}
+
+/********** IPU PSYS-specific resource handling **********/
+int ipu_psys_resource_pool_init(struct ipu_psys_resource_pool *pool)
+{
+ int i, j, k, ret;
+ const struct ipu_fw_resource_definitions *res_defs;
+
+ res_defs = get_res();
+
+ spin_lock_init(&pool->queues_lock);
+ pool->cells = 0;
+
+ for (i = 0; i < res_defs->num_dev_channels; i++) {
+ ret = ipu_resource_init(&pool->dev_channels[i], i,
+ res_defs->dev_channels[i]);
+ if (ret)
+ goto error;
+ }
+
+ for (j = 0; j < res_defs->num_ext_mem_ids; j++) {
+ ret = ipu_resource_init(&pool->ext_memory[j], j,
+ res_defs->ext_mem_ids[j]);
+ if (ret)
+ goto memory_error;
+ }
+
+ for (k = 0; k < res_defs->num_dfm_ids; k++) {
+ ret = ipu_resource_init(&pool->dfms[k], k, res_defs->dfms[k]);
+ if (ret)
+ goto dfm_error;
+ }
+
+ spin_lock(&pool->queues_lock);
+ if (ipu_ver == IPU_VER_6SE)
+ bitmap_zero(pool->cmd_queues,
+ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID);
+ else
+ bitmap_zero(pool->cmd_queues,
+ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID);
+ spin_unlock(&pool->queues_lock);
+
+ return 0;
+
+dfm_error:
+ for (k--; k >= 0; k--)
+ ipu_resource_cleanup(&pool->dfms[k]);
+
+memory_error:
+ for (j--; j >= 0; j--)
+ ipu_resource_cleanup(&pool->ext_memory[j]);
+
+error:
+ for (i--; i >= 0; i--)
+ ipu_resource_cleanup(&pool->dev_channels[i]);
+ return ret;
+}
+
+void ipu_psys_resource_copy(struct ipu_psys_resource_pool *src,
+ struct ipu_psys_resource_pool *dest)
+{
+ int i;
+ const struct ipu_fw_resource_definitions *res_defs;
+
+ res_defs = get_res();
+
+ dest->cells = src->cells;
+ for (i = 0; i < res_defs->num_dev_channels; i++)
+ *dest->dev_channels[i].bitmap = *src->dev_channels[i].bitmap;
+
+ for (i = 0; i < res_defs->num_ext_mem_ids; i++)
+ *dest->ext_memory[i].bitmap = *src->ext_memory[i].bitmap;
+
+ for (i = 0; i < res_defs->num_dfm_ids; i++)
+ *dest->dfms[i].bitmap = *src->dfms[i].bitmap;
+}
+
+void ipu_psys_resource_pool_cleanup(struct ipu_psys_resource_pool
+ *pool)
+{
+ u32 i;
+ const struct ipu_fw_resource_definitions *res_defs;
+
+ res_defs = get_res();
+ for (i = 0; i < res_defs->num_dev_channels; i++)
+ ipu_resource_cleanup(&pool->dev_channels[i]);
+
+ for (i = 0; i < res_defs->num_ext_mem_ids; i++)
+ ipu_resource_cleanup(&pool->ext_memory[i]);
+
+ for (i = 0; i < res_defs->num_dfm_ids; i++)
+ ipu_resource_cleanup(&pool->dfms[i]);
+}
+
+static int __alloc_one_resrc(const struct device *dev,
+ struct ipu_fw_psys_process *process,
+ struct ipu_resource *resource,
+ struct ipu_fw_generic_program_manifest *pm,
+ u32 resource_id,
+ struct ipu_psys_resource_alloc *alloc)
+{
+ const u16 resource_req = pm->dev_chn_size[resource_id];
+ const u16 resource_offset_req = pm->dev_chn_offset[resource_id];
+ unsigned long retl;
+
+ if (resource_req <= 0)
+ return 0;
+
+ if (alloc->resources >= IPU_MAX_RESOURCES) {
+ dev_err(dev, "out of resource handles\n");
+ return -ENOSPC;
+ }
+ if (resource_offset_req != (u16)(-1))
+ retl = ipu_resource_alloc_with_pos
+ (resource,
+ resource_req,
+ resource_offset_req,
+ &alloc->resource_alloc[alloc->resources],
+ IPU_RESOURCE_DEV_CHN);
+ else
+ retl = ipu_resource_alloc
+ (resource, resource_req,
+ &alloc->resource_alloc[alloc->resources],
+ IPU_RESOURCE_DEV_CHN);
+ if (IS_ERR_VALUE(retl)) {
+ dev_dbg(dev, "out of device channel resources\n");
+ return (int)retl;
+ }
+ alloc->resources++;
+
+ return 0;
+}
+
+static int ipu_psys_allocate_one_dfm(const struct device *dev,
+ struct ipu_fw_psys_process *process,
+ struct ipu_resource *resource,
+ struct ipu_fw_generic_program_manifest *pm,
+ u32 resource_id,
+ struct ipu_psys_resource_alloc *alloc)
+{
+ u32 dfm_bitmap_req = pm->dfm_port_bitmap[resource_id];
+ u32 active_dfm_bitmap_req = pm->dfm_active_port_bitmap[resource_id];
+ const u8 is_relocatable = pm->is_dfm_relocatable[resource_id];
+ struct ipu_resource_alloc *alloc_resource;
+ unsigned long p = 0;
+
+ if (dfm_bitmap_req == 0)
+ return 0;
+
+ if (alloc->resources >= IPU_MAX_RESOURCES) {
+ dev_err(dev, "out of resource handles\n");
+ return -ENOSPC;
+ }
+
+ if (!resource->bitmap)
+ return -ENOSPC;
+
+ if (!is_relocatable) {
+ if (*resource->bitmap & dfm_bitmap_req) {
+ dev_warn(dev,
+ "out of dfm resources, req 0x%x, get 0x%lx\n",
+ dfm_bitmap_req, *resource->bitmap);
+ return -ENOSPC;
+ }
+ *resource->bitmap |= dfm_bitmap_req;
+ } else {
+ unsigned int n = hweight32(dfm_bitmap_req);
+
+ p = bitmap_find_next_zero_area(resource->bitmap,
+ resource->elements, 0, n, 0);
+
+ if (p >= resource->elements)
+ return -ENOSPC;
+
+ bitmap_set(resource->bitmap, p, n);
+ dfm_bitmap_req = dfm_bitmap_req << p;
+ active_dfm_bitmap_req = active_dfm_bitmap_req << p;
+ }
+
+ alloc_resource = &alloc->resource_alloc[alloc->resources];
+ alloc_resource->resource = resource;
+ /* Using elements to indicate the bitmap */
+ alloc_resource->elements = dfm_bitmap_req;
+ alloc_resource->pos = p;
+ alloc_resource->type = IPU_RESOURCE_DFM;
+
+ alloc->resources++;
+
+ return 0;
+}
+
+/*
+ * ext_mem_type_id is a generic type id for memory (like DMEM, VMEM)
+ * ext_mem_bank_id is detailed type id for memory (like DMEM0, DMEM1 etc.)
+ */
+static int __alloc_mem_resrc(const struct device *dev,
+ struct ipu_fw_psys_process *process,
+ struct ipu_resource *resource,
+ struct ipu_fw_generic_program_manifest *pm,
+ u32 ext_mem_type_id, u32 ext_mem_bank_id,
+ struct ipu_psys_resource_alloc *alloc)
+{
+ const u16 memory_resource_req = pm->ext_mem_size[ext_mem_type_id];
+ const u16 memory_offset_req = pm->ext_mem_offset[ext_mem_type_id];
+
+ unsigned long retl;
+
+ if (memory_resource_req <= 0)
+ return 0;
+
+ if (alloc->resources >= IPU_MAX_RESOURCES) {
+ dev_err(dev, "out of resource handles\n");
+ return -ENOSPC;
+ }
+ if (memory_offset_req != (u16)(-1))
+ retl = ipu_resource_alloc_with_pos
+ (resource,
+ memory_resource_req, memory_offset_req,
+ &alloc->resource_alloc[alloc->resources],
+ IPU_RESOURCE_EXT_MEM);
+ else
+ retl = ipu_resource_alloc
+ (resource, memory_resource_req,
+ &alloc->resource_alloc[alloc->resources],
+ IPU_RESOURCE_EXT_MEM);
+ if (IS_ERR_VALUE(retl)) {
+ dev_dbg(dev, "out of memory resources\n");
+ return (int)retl;
+ }
+
+ alloc->resources++;
+
+ return 0;
+}
+
+int ipu_psys_allocate_cmd_queue_resource(struct ipu_psys_resource_pool *pool)
+{
+ unsigned long p;
+ int size, start;
+
+ size = IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ start = IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID;
+
+ if (ipu_ver == IPU_VER_6SE) {
+ size = IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID;
+ start = IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID;
+ }
+
+ spin_lock(&pool->queues_lock);
+ /* find available cmd queue from ppg0_cmd_id */
+ p = bitmap_find_next_zero_area(pool->cmd_queues, size, start, 1, 0);
+
+ if (p >= size) {
+ spin_unlock(&pool->queues_lock);
+ return -ENOSPC;
+ }
+
+ bitmap_set(pool->cmd_queues, p, 1);
+ spin_unlock(&pool->queues_lock);
+
+ return p;
+}
+
+void ipu_psys_free_cmd_queue_resource(struct ipu_psys_resource_pool *pool,
+ u8 queue_id)
+{
+ spin_lock(&pool->queues_lock);
+ bitmap_clear(pool->cmd_queues, queue_id, 1);
+ spin_unlock(&pool->queues_lock);
+}
+
+int ipu_psys_try_allocate_resources(struct device *dev,
+ struct ipu_fw_psys_process_group *pg,
+ void *pg_manifest,
+ struct ipu_psys_resource_pool *pool)
+{
+ u32 id, idx;
+ u32 mem_type_id;
+ int ret, i;
+ u16 *process_offset_table;
+ u8 processes;
+ u32 cells = 0;
+ struct ipu_psys_resource_alloc *alloc;
+ const struct ipu_fw_resource_definitions *res_defs;
+
+ if (!pg)
+ return -EINVAL;
+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset);
+ processes = pg->process_count;
+
+ alloc = kzalloc(sizeof(*alloc), GFP_KERNEL);
+ if (!alloc)
+ return -ENOMEM;
+
+ res_defs = get_res();
+ for (i = 0; i < processes; i++) {
+ u32 cell;
+ struct ipu_fw_psys_process *process =
+ (struct ipu_fw_psys_process *)
+ ((char *)pg + process_offset_table[i]);
+ struct ipu_fw_generic_program_manifest pm;
+
+ memset(&pm, 0, sizeof(pm));
+
+ if (!process) {
+ dev_err(dev, "can not get process\n");
+ ret = -ENOENT;
+ goto free_out;
+ }
+
+ ret = ipu_fw_psys_get_program_manifest_by_process
+ (&pm, pg_manifest, process);
+ if (ret < 0) {
+ dev_err(dev, "can not get manifest\n");
+ goto free_out;
+ }
+
+ if (pm.cell_id == res_defs->num_cells &&
+ pm.cell_type_id == res_defs->num_cells_type) {
+ cell = res_defs->num_cells;
+ } else if ((pm.cell_id != res_defs->num_cells &&
+ pm.cell_type_id == res_defs->num_cells_type)) {
+ cell = pm.cell_id;
+ } else {
+ /* Find a free cell of desired type */
+ u32 type = pm.cell_type_id;
+
+ for (cell = 0; cell < res_defs->num_cells; cell++)
+ if (res_defs->cells[cell] == type &&
+ ((pool->cells | cells) & (1 << cell)) == 0)
+ break;
+ if (cell >= res_defs->num_cells) {
+ dev_dbg(dev, "no free cells of right type\n");
+ ret = -ENOSPC;
+ goto free_out;
+ }
+ }
+ if (cell < res_defs->num_cells)
+ cells |= 1 << cell;
+ if (pool->cells & cells) {
+ dev_dbg(dev, "out of cell resources\n");
+ ret = -ENOSPC;
+ goto free_out;
+ }
+
+ if (pm.dev_chn_size) {
+ for (id = 0; id < res_defs->num_dev_channels; id++) {
+ ret = __alloc_one_resrc(dev, process,
+ &pool->dev_channels[id],
+ &pm, id, alloc);
+ if (ret)
+ goto free_out;
+ }
+ }
+
+ if (pm.dfm_port_bitmap) {
+ for (id = 0; id < res_defs->num_dfm_ids; id++) {
+ ret = ipu_psys_allocate_one_dfm
+ (dev, process,
+ &pool->dfms[id], &pm, id, alloc);
+ if (ret)
+ goto free_out;
+ }
+ }
+
+ if (pm.ext_mem_size) {
+ for (mem_type_id = 0;
+ mem_type_id < res_defs->num_ext_mem_types;
+ mem_type_id++) {
+ u32 bank = res_defs->num_ext_mem_ids;
+
+ if (cell != res_defs->num_cells) {
+ idx = res_defs->cell_mem_row * cell +
+ mem_type_id;
+ bank = res_defs->cell_mem[idx];
+ }
+
+ if (bank == res_defs->num_ext_mem_ids)
+ continue;
+
+ ret = __alloc_mem_resrc(dev, process,
+ &pool->ext_memory[bank],
+ &pm, mem_type_id, bank,
+ alloc);
+ if (ret)
+ goto free_out;
+ }
+ }
+ }
+ alloc->cells |= cells;
+ pool->cells |= cells;
+
+ kfree(alloc);
+ return 0;
+
+free_out:
+ dev_dbg(dev, "failed to try_allocate resource\n");
+ kfree(alloc);
+ return ret;
+}
+
+/*
+ * Allocate resources for pg from `pool'. Mark the allocated
+ * resources into `alloc'. Returns 0 on success, -ENOSPC
+ * if there are no enough resources, in which cases resources
+ * are not allocated at all, or some other error on other conditions.
+ */
+int ipu_psys_allocate_resources(const struct device *dev,
+ struct ipu_fw_psys_process_group *pg,
+ void *pg_manifest,
+ struct ipu_psys_resource_alloc
+ *alloc, struct ipu_psys_resource_pool
+ *pool)
+{
+ u32 id;
+ u32 mem_type_id;
+ int ret, i;
+ u16 *process_offset_table;
+ u8 processes;
+ u32 cells = 0;
+ int p, idx;
+ u32 bmp, a_bmp;
+ const struct ipu_fw_resource_definitions *res_defs;
+
+ if (!pg)
+ return -EINVAL;
+
+ res_defs = get_res();
+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset);
+ processes = pg->process_count;
+
+ for (i = 0; i < processes; i++) {
+ u32 cell;
+ struct ipu_fw_psys_process *process =
+ (struct ipu_fw_psys_process *)
+ ((char *)pg + process_offset_table[i]);
+ struct ipu_fw_generic_program_manifest pm;
+
+ memset(&pm, 0, sizeof(pm));
+ if (!process) {
+ dev_err(dev, "can not get process\n");
+ ret = -ENOENT;
+ goto free_out;
+ }
+
+ ret = ipu_fw_psys_get_program_manifest_by_process
+ (&pm, pg_manifest, process);
+ if (ret < 0) {
+ dev_err(dev, "can not get manifest\n");
+ goto free_out;
+ }
+
+ if (pm.cell_id == res_defs->num_cells &&
+ pm.cell_type_id == res_defs->num_cells_type) {
+ cell = res_defs->num_cells;
+ } else if ((pm.cell_id != res_defs->num_cells &&
+ pm.cell_type_id == res_defs->num_cells_type)) {
+ cell = pm.cell_id;
+ } else {
+ /* Find a free cell of desired type */
+ u32 type = pm.cell_type_id;
+
+ for (cell = 0; cell < res_defs->num_cells; cell++)
+ if (res_defs->cells[cell] == type &&
+ ((pool->cells | cells) & (1 << cell)) == 0)
+ break;
+ if (cell >= res_defs->num_cells) {
+ dev_dbg(dev, "no free cells of right type\n");
+ ret = -ENOSPC;
+ goto free_out;
+ }
+ ret = ipu_fw_psys_set_process_cell_id(process, 0, cell);
+ if (ret)
+ goto free_out;
+ }
+ if (cell < res_defs->num_cells)
+ cells |= 1 << cell;
+ if (pool->cells & cells) {
+ dev_dbg(dev, "out of cell resources\n");
+ ret = -ENOSPC;
+ goto free_out;
+ }
+
+ if (pm.dev_chn_size) {
+ for (id = 0; id < res_defs->num_dev_channels; id++) {
+ ret = __alloc_one_resrc(dev, process,
+ &pool->dev_channels[id],
+ &pm, id, alloc);
+ if (ret)
+ goto free_out;
+
+ idx = alloc->resources - 1;
+ p = alloc->resource_alloc[idx].pos;
+ ret = ipu_fw_psys_set_proc_dev_chn(process, id,
+ p);
+ if (ret)
+ goto free_out;
+ }
+ }
+
+ if (pm.dfm_port_bitmap) {
+ for (id = 0; id < res_defs->num_dfm_ids; id++) {
+ ret = ipu_psys_allocate_one_dfm(dev, process,
+ &pool->dfms[id],
+ &pm, id, alloc);
+ if (ret)
+ goto free_out;
+
+ idx = alloc->resources - 1;
+ p = alloc->resource_alloc[idx].pos;
+ bmp = pm.dfm_port_bitmap[id];
+ bmp = bmp << p;
+ a_bmp = pm.dfm_active_port_bitmap[id];
+ a_bmp = a_bmp << p;
+ ret = ipu_fw_psys_set_proc_dfm_bitmap(process,
+ id, bmp,
+ a_bmp);
+ if (ret)
+ goto free_out;
+ }
+ }
+
+ if (pm.ext_mem_size) {
+ for (mem_type_id = 0;
+ mem_type_id < res_defs->num_ext_mem_types;
+ mem_type_id++) {
+ u32 bank = res_defs->num_ext_mem_ids;
+
+ if (cell != res_defs->num_cells) {
+ idx = res_defs->cell_mem_row * cell +
+ mem_type_id;
+ bank = res_defs->cell_mem[idx];
+ }
+ if (bank == res_defs->num_ext_mem_ids)
+ continue;
+
+ ret = __alloc_mem_resrc(dev, process,
+ &pool->ext_memory[bank],
+ &pm, mem_type_id,
+ bank, alloc);
+ if (ret)
+ goto free_out;
+
+ /* no return value check here because fw api
+ * will do some checks, and would return
+ * non-zero except mem_type_id == 0.
+ * This maybe caused by that above flow of
+ * allocating mem_bank_id is improper.
+ */
+ idx = alloc->resources - 1;
+ p = alloc->resource_alloc[idx].pos;
+ ipu_fw_psys_set_process_ext_mem(process,
+ mem_type_id,
+ bank, p);
+ }
+ }
+ }
+ alloc->cells |= cells;
+ pool->cells |= cells;
+ return 0;
+
+free_out:
+ dev_err(dev, "failed to allocate resources, ret %d\n", ret);
+ ipu_psys_reset_process_cell(dev, pg, pg_manifest, i + 1);
+ ipu_psys_free_resources(alloc, pool);
+ return ret;
+}
+
+int ipu_psys_move_resources(const struct device *dev,
+ struct ipu_psys_resource_alloc *alloc,
+ struct ipu_psys_resource_pool
+ *source_pool, struct ipu_psys_resource_pool
+ *target_pool)
+{
+ int i;
+
+ if (target_pool->cells & alloc->cells) {
+ dev_dbg(dev, "out of cell resources\n");
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < alloc->resources; i++) {
+ unsigned long bitmap = 0;
+ unsigned int id = alloc->resource_alloc[i].resource->id;
+ unsigned long fbit, end;
+
+ switch (alloc->resource_alloc[i].type) {
+ case IPU_RESOURCE_DEV_CHN:
+ bitmap_set(&bitmap, alloc->resource_alloc[i].pos,
+ alloc->resource_alloc[i].elements);
+ if (*target_pool->dev_channels[id].bitmap & bitmap)
+ return -ENOSPC;
+ break;
+ case IPU_RESOURCE_EXT_MEM:
+ end = alloc->resource_alloc[i].elements +
+ alloc->resource_alloc[i].pos;
+
+ fbit = find_next_bit(target_pool->ext_memory[id].bitmap,
+ end, alloc->resource_alloc[i].pos);
+ /* if find_next_bit returns "end" it didn't find 1bit */
+ if (end != fbit)
+ return -ENOSPC;
+ break;
+ case IPU_RESOURCE_DFM:
+ bitmap = alloc->resource_alloc[i].elements;
+ if (*target_pool->dfms[id].bitmap & bitmap)
+ return -ENOSPC;
+ break;
+ default:
+ dev_err(dev, "Illegal resource type\n");
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < alloc->resources; i++) {
+ u32 id = alloc->resource_alloc[i].resource->id;
+
+ switch (alloc->resource_alloc[i].type) {
+ case IPU_RESOURCE_DEV_CHN:
+ bitmap_set(target_pool->dev_channels[id].bitmap,
+ alloc->resource_alloc[i].pos,
+ alloc->resource_alloc[i].elements);
+ ipu_resource_free(&alloc->resource_alloc[i]);
+ alloc->resource_alloc[i].resource =
+ &target_pool->dev_channels[id];
+ break;
+ case IPU_RESOURCE_EXT_MEM:
+ bitmap_set(target_pool->ext_memory[id].bitmap,
+ alloc->resource_alloc[i].pos,
+ alloc->resource_alloc[i].elements);
+ ipu_resource_free(&alloc->resource_alloc[i]);
+ alloc->resource_alloc[i].resource =
+ &target_pool->ext_memory[id];
+ break;
+ case IPU_RESOURCE_DFM:
+ *target_pool->dfms[id].bitmap |=
+ alloc->resource_alloc[i].elements;
+ *alloc->resource_alloc[i].resource->bitmap &=
+ ~(alloc->resource_alloc[i].elements);
+ alloc->resource_alloc[i].resource =
+ &target_pool->dfms[id];
+ break;
+ default:
+ /*
+ * Just keep compiler happy. This case failed already
+ * in above loop.
+ */
+ break;
+ }
+ }
+
+ target_pool->cells |= alloc->cells;
+ source_pool->cells &= ~alloc->cells;
+
+ return 0;
+}
+
+void ipu_psys_reset_process_cell(const struct device *dev,
+ struct ipu_fw_psys_process_group *pg,
+ void *pg_manifest,
+ int process_count)
+{
+ int i;
+ u16 *process_offset_table;
+ const struct ipu_fw_resource_definitions *res_defs;
+
+ if (!pg)
+ return;
+
+ res_defs = get_res();
+ process_offset_table = (u16 *)((u8 *)pg + pg->processes_offset);
+ for (i = 0; i < process_count; i++) {
+ struct ipu_fw_psys_process *process =
+ (struct ipu_fw_psys_process *)
+ ((char *)pg + process_offset_table[i]);
+ struct ipu_fw_generic_program_manifest pm;
+ int ret;
+
+ if (!process)
+ break;
+
+ ret = ipu_fw_psys_get_program_manifest_by_process(&pm,
+ pg_manifest,
+ process);
+ if (ret < 0) {
+ dev_err(dev, "can not get manifest\n");
+ break;
+ }
+ if ((pm.cell_id != res_defs->num_cells &&
+ pm.cell_type_id == res_defs->num_cells_type))
+ continue;
+ /* no return value check here because if finding free cell
+ * failed, process cell would not set then calling clear_cell
+ * will return non-zero.
+ */
+ ipu_fw_psys_clear_process_cell(process);
+ }
+}
+
+/* Free resources marked in `alloc' from `resources' */
+void ipu_psys_free_resources(struct ipu_psys_resource_alloc
+ *alloc, struct ipu_psys_resource_pool *pool)
+{
+ unsigned int i;
+
+ pool->cells &= ~alloc->cells;
+ alloc->cells = 0;
+ for (i = 0; i < alloc->resources; i++)
+ ipu_resource_free(&alloc->resource_alloc[i]);
+ alloc->resources = 0;
+}
new file mode 100644
@@ -0,0 +1,608 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2021 Intel Corporation
+
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "ipu-psys.h"
+#include "ipu-fw-psys.h"
+#include "ipu6-platform-resources.h"
+
+/* resources table */
+
+/*
+ * Cell types by cell IDs
+ */
+static const u8 ipu6_fw_psys_cell_types[IPU6_FW_PSYS_N_CELL_ID] = {
+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID,
+ IPU6_FW_PSYS_VP_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* ICA_MEDIUM */
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */
+ IPU6_FW_PSYS_GDC_TYPE_ID,
+ IPU6_FW_PSYS_TNR_TYPE_ID,
+};
+
+static const u16 ipu6_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = {
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE,
+};
+
+static const u16 ipu6_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = {
+ IPU6_FW_PSYS_VMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE,
+ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE,
+ IPU6_FW_PSYS_BAMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM1_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM2_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM3_MAX_SIZE,
+ IPU6_FW_PSYS_PMEM0_MAX_SIZE
+};
+
+static const u16 ipu6_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = {
+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE,
+};
+
+static const u8
+ipu6_fw_psys_c_mem[IPU6_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = {
+ {
+ /* IPU6_FW_PSYS_SP0_ID */
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_DMEM0_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_SP1_ID */
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_DMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_VP0_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_DMEM3_ID,
+ IPU6_FW_PSYS_VMEM0_ID,
+ IPU6_FW_PSYS_BAMEM0_ID,
+ IPU6_FW_PSYS_PMEM0_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC1_ID BNLM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC2_ID DM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC3_ID ACM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC9_ID GLTM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC10_ID XNR */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_ICA_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_LSC_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_DPC_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_SIS_A_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_SIS_B_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_B2B_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_R2I_DS_B_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_AWB_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_AE_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_AF_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_DOL_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_PAF_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ }
+};
+
+static const struct ipu_fw_resource_definitions ipu6_defs = {
+ .cells = ipu6_fw_psys_cell_types,
+ .num_cells = IPU6_FW_PSYS_N_CELL_ID,
+ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID,
+
+ .dev_channels = ipu6_fw_num_dev_channels,
+ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID,
+
+ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID,
+ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID,
+ .ext_mem_ids = ipu6_fw_psys_mem_size,
+
+ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID,
+
+ .dfms = ipu6_fw_psys_dfms,
+
+ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID,
+ .cell_mem = &ipu6_fw_psys_c_mem[0][0],
+};
+
+const struct ipu_fw_resource_definitions *ipu6_res_defs = &ipu6_defs;
+
+/********** Generic resource handling **********/
+
+int ipu6_fw_psys_set_proc_dev_chn(struct ipu_fw_psys_process *ptr, u16 offset,
+ u16 value)
+{
+ struct ipu6_fw_psys_process_ext *pm_ext;
+ u8 ps_ext_offset;
+
+ ps_ext_offset = ptr->process_extension_offset;
+ if (!ps_ext_offset)
+ return -EINVAL;
+
+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset);
+
+ pm_ext->dev_chn_offset[offset] = value;
+
+ return 0;
+}
+
+int ipu6_fw_psys_set_proc_dfm_bitmap(struct ipu_fw_psys_process *ptr,
+ u16 id, u32 bitmap,
+ u32 active_bitmap)
+{
+ struct ipu6_fw_psys_process_ext *pm_ext;
+ u8 ps_ext_offset;
+
+ ps_ext_offset = ptr->process_extension_offset;
+ if (!ps_ext_offset)
+ return -EINVAL;
+
+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset);
+
+ pm_ext->dfm_port_bitmap[id] = bitmap;
+ pm_ext->dfm_active_port_bitmap[id] = active_bitmap;
+
+ return 0;
+}
+
+int ipu6_fw_psys_set_process_ext_mem(struct ipu_fw_psys_process *ptr,
+ u16 type_id, u16 mem_id, u16 offset)
+{
+ struct ipu6_fw_psys_process_ext *pm_ext;
+ u8 ps_ext_offset;
+
+ ps_ext_offset = ptr->process_extension_offset;
+ if (!ps_ext_offset)
+ return -EINVAL;
+
+ pm_ext = (struct ipu6_fw_psys_process_ext *)((u8 *)ptr + ps_ext_offset);
+
+ pm_ext->ext_mem_offset[type_id] = offset;
+ pm_ext->ext_mem_id[type_id] = mem_id;
+
+ return 0;
+}
+
+static struct ipu_fw_psys_program_manifest *
+get_program_manifest(const struct ipu_fw_psys_program_group_manifest *manifest,
+ const unsigned int program_index)
+{
+ struct ipu_fw_psys_program_manifest *prg_manifest_base;
+ u8 *program_manifest = NULL;
+ u8 program_count;
+ unsigned int i;
+
+ program_count = manifest->program_count;
+
+ prg_manifest_base = (struct ipu_fw_psys_program_manifest *)
+ ((char *)manifest + manifest->program_manifest_offset);
+ if (program_index < program_count) {
+ program_manifest = (u8 *)prg_manifest_base;
+ for (i = 0; i < program_index; i++)
+ program_manifest +=
+ ((struct ipu_fw_psys_program_manifest *)
+ program_manifest)->size;
+ }
+
+ return (struct ipu_fw_psys_program_manifest *)program_manifest;
+}
+
+int ipu6_fw_psys_get_program_manifest_by_process(
+ struct ipu_fw_generic_program_manifest *gen_pm,
+ const struct ipu_fw_psys_program_group_manifest *pg_manifest,
+ struct ipu_fw_psys_process *process)
+{
+ u32 program_id = process->program_idx;
+ struct ipu_fw_psys_program_manifest *pm;
+ struct ipu6_fw_psys_program_manifest_ext *pm_ext;
+
+ pm = get_program_manifest(pg_manifest, program_id);
+
+ if (!pm)
+ return -ENOENT;
+
+ if (pm->program_extension_offset) {
+ pm_ext = (struct ipu6_fw_psys_program_manifest_ext *)
+ ((u8 *)pm + pm->program_extension_offset);
+
+ gen_pm->dev_chn_size = pm_ext->dev_chn_size;
+ gen_pm->dev_chn_offset = pm_ext->dev_chn_offset;
+ gen_pm->ext_mem_size = pm_ext->ext_mem_size;
+ gen_pm->ext_mem_offset = (u16 *)pm_ext->ext_mem_offset;
+ gen_pm->is_dfm_relocatable = pm_ext->is_dfm_relocatable;
+ gen_pm->dfm_port_bitmap = pm_ext->dfm_port_bitmap;
+ gen_pm->dfm_active_port_bitmap =
+ pm_ext->dfm_active_port_bitmap;
+ }
+
+ memcpy(gen_pm->cells, pm->cells, sizeof(pm->cells));
+ gen_pm->cell_id = pm->cells[0];
+ gen_pm->cell_type_id = pm->cell_type_id;
+
+ return 0;
+}
+
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) || \
+ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
+ struct ipu_psys_kcmd *kcmd, const char *note)
+{
+ struct ipu_fw_psys_process_group *pg = kcmd->kpg->pg;
+ u32 pgid = pg->ID;
+ u8 processes = pg->process_count;
+ u16 *process_offset_table = (u16 *)((char *)pg + pg->processes_offset);
+ unsigned int p, chn, mem, mem_id;
+ unsigned int mem_type, max_mem_id, dev_chn;
+
+ if (ipu_ver == IPU_VER_6SE) {
+ mem_type = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID;
+ max_mem_id = IPU6SE_FW_PSYS_N_MEM_ID;
+ dev_chn = IPU6SE_FW_PSYS_N_DEV_CHN_ID;
+ } else if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) {
+ mem_type = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID;
+ max_mem_id = IPU6_FW_PSYS_N_MEM_ID;
+ dev_chn = IPU6_FW_PSYS_N_DEV_CHN_ID;
+ } else {
+ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver);
+ return;
+ }
+
+ dev_dbg(&psys->adev->dev, "%s %s pgid %i has %i processes:\n",
+ __func__, note, pgid, processes);
+
+ for (p = 0; p < processes; p++) {
+ struct ipu_fw_psys_process *process =
+ (struct ipu_fw_psys_process *)
+ ((char *)pg + process_offset_table[p]);
+ struct ipu6_fw_psys_process_ext *pm_ext =
+ (struct ipu6_fw_psys_process_ext *)((u8 *)process
+ + process->process_extension_offset);
+ dev_dbg(&psys->adev->dev, "\t process %i size=%u",
+ p, process->size);
+ if (!process->process_extension_offset)
+ continue;
+
+ for (mem = 0; mem < mem_type; mem++) {
+ mem_id = pm_ext->ext_mem_id[mem];
+ if (mem_id != max_mem_id)
+ dev_dbg(&psys->adev->dev,
+ "\t mem type %u id %d offset=0x%x",
+ mem, mem_id,
+ pm_ext->ext_mem_offset[mem]);
+ }
+ for (chn = 0; chn < dev_chn; chn++) {
+ if (pm_ext->dev_chn_offset[chn] != (u16)(-1))
+ dev_dbg(&psys->adev->dev,
+ "\t dev_chn[%u]=0x%x\n",
+ chn, pm_ext->dev_chn_offset[chn]);
+ }
+ }
+}
+#else
+void ipu6_fw_psys_pg_dump(struct ipu_psys *psys,
+ struct ipu_psys_kcmd *kcmd, const char *note)
+{
+ if (ipu_ver == IPU_VER_6SE || ipu_ver == IPU_VER_6 ||
+ ipu_ver == IPU_VER_6EP)
+ return;
+
+ WARN(1, "%s ipu_ver:[%u] is unsupported!\n", __func__, ipu_ver);
+}
+#endif
new file mode 100644
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <media/ipu-isys.h>
+#include "ipu.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-phy.h"
+#include "ipu-isys-csi2.h"
+
+struct ipu6_csi2_error {
+ const char *error_string;
+ bool is_info_only;
+};
+
+struct ipu6_csi_irq_info_map {
+ u32 irq_error_mask;
+ u32 irq_num;
+ unsigned int irq_base;
+ unsigned int irq_base_ctrl2;
+ struct ipu6_csi2_error *errors;
+};
+
+/*
+ * Strings corresponding to CSI-2 receiver errors are here.
+ * Corresponding macros are defined in the header file.
+ */
+static struct ipu6_csi2_error dphy_rx_errors[] = {
+ {"Single packet header error corrected", true},
+ {"Multiple packet header errors detected", true},
+ {"Payload checksum (CRC) error", true},
+ {"Transfer FIFO overflow", false},
+ {"Reserved short packet data type detected", true},
+ {"Reserved long packet data type detected", true},
+ {"Incomplete long packet detected", false},
+ {"Frame sync error", false},
+ {"Line sync error", false},
+ {"DPHY recoverable synchronization error", true},
+ {"DPHY fatal error", false},
+ {"DPHY elastic FIFO overflow", false},
+ {"Inter-frame short packet discarded", true},
+ {"Inter-frame long packet discarded", true},
+ {"MIPI pktgen overflow", false},
+ {"MIPI pktgen data loss", false},
+ {"FIFO overflow", false},
+ {"Lane deskew", false},
+ {"SOT sync error", false},
+ {"HSIDLE detected", false}
+};
+
+static refcount_t phy_power_ref_count[IPU_ISYS_CSI_PHY_NUM];
+
+static int ipu6_csi2_phy_power_set(struct ipu_isys *isys,
+ struct ipu_isys_csi2_config *cfg, bool on)
+{
+ int ret = 0;
+ unsigned int port, phy_id;
+ refcount_t *ref;
+ void __iomem *isys_base = isys->pdata->base;
+ unsigned int nr;
+
+ port = cfg->port;
+ phy_id = port / 4;
+ ref = &phy_power_ref_count[phy_id];
+ dev_dbg(&isys->adev->dev, "for phy %d port %d, lanes: %d\n",
+ phy_id, port, cfg->nlanes);
+
+ nr = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_ISYS_CSI_PORT_NUM : IPU6SE_ISYS_CSI_PORT_NUM;
+
+ if (!isys_base || port >= nr) {
+ dev_warn(&isys->adev->dev, "invalid port ID %d\n", port);
+ return -EINVAL;
+ }
+
+ if (on) {
+ if (refcount_read(ref)) {
+ /* already up */
+ dev_warn(&isys->adev->dev, "for phy %d is already UP",
+ phy_id);
+ refcount_inc(ref);
+ return 0;
+ }
+
+ ret = ipu6_isys_phy_powerup_ack(isys, phy_id);
+ if (ret)
+ return ret;
+
+ ipu6_isys_phy_reset(isys, phy_id, 0);
+ ipu6_isys_phy_common_init(isys);
+
+ ret = ipu6_isys_phy_config(isys);
+ if (ret)
+ return ret;
+
+ ipu6_isys_phy_reset(isys, phy_id, 1);
+ ret = ipu6_isys_phy_ready(isys, phy_id);
+ if (ret)
+ return ret;
+
+ refcount_set(ref, 1);
+ return 0;
+ }
+
+ /* power off process */
+ if (refcount_dec_and_test(ref))
+ ret = ipu6_isys_phy_powerdown_ack(isys, phy_id);
+ if (ret)
+ dev_err(&isys->adev->dev, "phy poweroff failed!");
+
+ return ret;
+}
+
+static void ipu6_isys_register_errors(struct ipu_isys_csi2 *csi2)
+{
+ u32 mask = 0;
+ u32 irq = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+
+ mask = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_CSI_RX_ERROR_IRQ_MASK : IPU6SE_CSI_RX_ERROR_IRQ_MASK;
+
+ writel(irq & mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ csi2->receiver_errors |= irq & mask;
+}
+
+void ipu_isys_csi2_error(struct ipu_isys_csi2 *csi2)
+{
+ struct ipu6_csi2_error *errors;
+ u32 status;
+ unsigned int i;
+
+ /* Register errors once more in case of error interrupts are disabled */
+ ipu6_isys_register_errors(csi2);
+ status = csi2->receiver_errors;
+ csi2->receiver_errors = 0;
+ errors = dphy_rx_errors;
+
+ for (i = 0; i < CSI_RX_NUM_ERRORS_IN_IRQ; i++) {
+ if (status & BIT(i))
+ dev_err_ratelimited(&csi2->isys->adev->dev,
+ "csi2-%i error: %s\n",
+ csi2->index,
+ errors[i].error_string);
+ }
+}
+
+const unsigned int csi2_port_cfg[][3] = {
+ {0, 0, 0x1f}, /* no link */
+ {4, 0, 0x10}, /* x4 + x4 config */
+ {2, 0, 0x12}, /* x2 + x2 config */
+ {1, 0, 0x13}, /* x1 + x1 config */
+ {2, 1, 0x15}, /* x2x1 + x2x1 config */
+ {1, 1, 0x16}, /* x1x1 + x1x1 config */
+ {2, 2, 0x18}, /* x2x2 + x2x2 config */
+ {1, 2, 0x19}, /* x1x2 + x1x2 config */
+};
+
+const unsigned int phy_port_cfg[][4] = {
+ /* port, nlanes, bbindex, portcfg */
+ /* sip0 */
+ {0, 1, 0, 0x15},
+ {0, 2, 0, 0x15},
+ {0, 4, 0, 0x15},
+ {0, 4, 2, 0x22},
+ /* sip1 */
+ {2, 1, 4, 0x15},
+ {2, 2, 4, 0x15},
+ {2, 4, 4, 0x15},
+ {2, 4, 6, 0x22},
+};
+
+static int ipu_isys_csi2_phy_config_by_port(struct ipu_isys *isys,
+ unsigned int port,
+ unsigned int nlanes)
+{
+ void __iomem *base = isys->adev->isp->base;
+ u32 val, reg, i;
+ unsigned int bbnum;
+
+ dev_dbg(&isys->adev->dev, "%s port %u with %u lanes", __func__,
+ port, nlanes);
+
+ /* hard code for x2x2 + x2x2 with <1.5Gbps */
+ for (i = 0; i < IPU6SE_ISYS_PHY_BB_NUM; i++) {
+ /* cphy_dll_ovrd.crcdc_fsm_dlane0 = 13 */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+ val = readl(base + reg);
+ val |= 13 << 1;
+ /* val &= ~0x1; */
+ writel(val, base + reg);
+
+ /* cphy_rx_control1.en_crc1 = 1 */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_RX_CONTROL1(i);
+ val = readl(base + reg);
+ val |= 0x1 << 31;
+ writel(val, base + reg);
+
+ /* dphy_cfg.reserved = 1
+ * dphy_cfg.lden_from_dll_ovrd_0 = 1
+ */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_DPHY_CFG(i);
+ val = readl(base + reg);
+ val |= 0x1 << 25;
+ val |= 0x1 << 26;
+ writel(val, base + reg);
+
+ /* cphy_dll_ovrd.lden_crcdc_fsm_dlane0 = 1 */
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_CPHY_DLL_OVRD(i);
+ val = readl(base + reg);
+ val |= 1;
+ writel(val, base + reg);
+ }
+
+ /* bb afe config, use minimal channel loss */
+ for (i = 0; i < ARRAY_SIZE(phy_port_cfg); i++) {
+ if (phy_port_cfg[i][0] == port &&
+ phy_port_cfg[i][1] == nlanes) {
+ bbnum = phy_port_cfg[i][2] / 2;
+ reg = IPU6SE_ISYS_PHY_0_BASE + PHY_BB_AFE_CONFIG(bbnum);
+ val = readl(base + reg);
+ val |= phy_port_cfg[i][3];
+ writel(val, base + reg);
+ }
+ }
+
+ return 0;
+}
+
+static void ipu_isys_csi2_rx_control(struct ipu_isys *isys)
+{
+ void __iomem *base = isys->adev->isp->base;
+ u32 val, reg;
+
+ /* lp11 release */
+ reg = CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL;
+ val = readl(base + reg);
+ val |= 0x1;
+ writel(0x1, base + CSI2_HUB_GPREG_SIP0_CSI_RX_A_CONTROL);
+
+ reg = CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL;
+ val = readl(base + reg);
+ val |= 0x1;
+ writel(0x1, base + CSI2_HUB_GPREG_SIP0_CSI_RX_B_CONTROL);
+
+ reg = CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL;
+ val = readl(base + reg);
+ val |= 0x1;
+ writel(0x1, base + CSI2_HUB_GPREG_SIP1_CSI_RX_A_CONTROL);
+
+ reg = CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL;
+ val = readl(base + reg);
+ val |= 0x1;
+ writel(0x1, base + CSI2_HUB_GPREG_SIP1_CSI_RX_B_CONTROL);
+}
+
+static int ipu_isys_csi2_set_port_cfg(struct v4l2_subdev *sd, unsigned int port,
+ unsigned int nlanes)
+{
+ struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+ struct ipu_isys *isys = csi2->isys;
+ unsigned int sip = port / 2;
+ unsigned int index;
+
+ switch (nlanes) {
+ case 1:
+ index = 5;
+ break;
+ case 2:
+ index = 6;
+ break;
+ case 4:
+ index = 1;
+ break;
+ default:
+ dev_err(&isys->adev->dev, "lanes nr %u is unsupported\n",
+ nlanes);
+ return -EINVAL;
+ }
+
+ dev_dbg(&isys->adev->dev, "port config for port %u with %u lanes\n",
+ port, nlanes);
+ writel(csi2_port_cfg[index][2],
+ isys->pdata->base + CSI2_HUB_GPREG_SIP_FB_PORT_CFG(sip));
+
+ return 0;
+}
+
+static void ipu_isys_csi2_set_timing(struct v4l2_subdev *sd,
+ struct ipu_isys_csi2_timing timing,
+ unsigned int port,
+ unsigned int nlanes)
+{
+ u32 port_base;
+ void __iomem *reg;
+ struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+ struct ipu_isys *isys = csi2->isys;
+ unsigned int i;
+
+ port_base = (port % 2) ? CSI2_SIP_TOP_CSI_RX_PORT_BASE_1(port) :
+ CSI2_SIP_TOP_CSI_RX_PORT_BASE_0(port);
+
+ dev_dbg(&isys->adev->dev,
+ "set timing for port %u base 0x%x with %u lanes\n",
+ port, port_base, nlanes);
+
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_CLANE;
+
+ writel(timing.ctermen, reg);
+
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_CLANE;
+ writel(timing.csettle, reg);
+
+ for (i = 0; i < nlanes; i++) {
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_TERMEN_DLANE(i);
+ writel(timing.dtermen, reg);
+
+ reg = isys->pdata->base + port_base;
+ reg += CSI2_SIP_TOP_CSI_RX_DLY_CNT_SETTLE_DLANE(i);
+ writel(timing.dsettle, reg);
+ }
+}
+
+int ipu_isys_csi2_set_stream(struct v4l2_subdev *sd,
+ struct ipu_isys_csi2_timing timing,
+ unsigned int nlanes, int enable)
+{
+ struct ipu_isys_csi2 *csi2 = to_ipu_isys_csi2(sd);
+ struct ipu_isys *isys = csi2->isys;
+ struct ipu_isys_pipeline *ip = container_of(sd->entity.pipe,
+ struct ipu_isys_pipeline,
+ pipe);
+ struct ipu_isys_csi2_config *cfg =
+ v4l2_get_subdev_hostdata(media_entity_to_v4l2_subdev
+ (ip->external->entity));
+ unsigned int port;
+ int ret;
+ u32 mask = 0;
+
+ port = cfg->port;
+ dev_dbg(&isys->adev->dev, "for port %u\n", port);
+
+ mask = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_CSI_RX_ERROR_IRQ_MASK : IPU6SE_CSI_RX_ERROR_IRQ_MASK;
+
+ if (!enable) {
+
+ writel(0, csi2->base + CSI_REG_CSI_FE_ENABLE);
+ writel(0, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+
+ /* Disable interrupts */
+ writel(0,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ writel(0,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+ writel(0xffffffff,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+ /* Disable clock */
+ writel(0, isys->pdata->base +
+ CSI_REG_HUB_FW_ACCESS_PORT(port));
+ writel(0, isys->pdata->base +
+ CSI_REG_HUB_DRV_ACCESS_PORT(port));
+
+ if (ipu_ver == IPU_VER_6SE)
+ return 0;
+
+ /* power down */
+ return ipu6_csi2_phy_power_set(isys, cfg, false);
+ }
+
+ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) {
+ /* Enable DPHY power */
+ ret = ipu6_csi2_phy_power_set(isys, cfg, true);
+ if (ret) {
+ dev_err(&isys->adev->dev,
+ "CSI-%d PHY power up failed %d\n",
+ cfg->port, ret);
+ return ret;
+ }
+ }
+
+ /* reset port reset */
+ writel(0x1, csi2->base + CSI_REG_PORT_GPREG_SRST);
+ usleep_range(100, 200);
+ writel(0x0, csi2->base + CSI_REG_PORT_GPREG_SRST);
+
+ /* Enable port clock */
+ writel(1, isys->pdata->base + CSI_REG_HUB_DRV_ACCESS_PORT(port));
+ writel(1, isys->pdata->base + CSI_REG_HUB_FW_ACCESS_PORT(port));
+
+ if (ipu_ver == IPU_VER_6SE) {
+ ipu_isys_csi2_phy_config_by_port(isys, port, nlanes);
+
+ /* 9'b00010.1000 for 400Mhz isys freqency */
+ writel(0x28,
+ isys->pdata->base + CSI2_HUB_GPREG_DPHY_TIMER_INCR);
+ /* set port cfg and rx timing */
+ ipu_isys_csi2_set_timing(sd, timing, port, nlanes);
+
+ ret = ipu_isys_csi2_set_port_cfg(sd, port, nlanes);
+ if (ret)
+ return ret;
+
+ ipu_isys_csi2_rx_control(isys);
+ }
+
+ /* enable all error related irq */
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+ writel(mask,
+ csi2->base + CSI_PORT_REG_BASE_IRQ_CSI +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+ /* To save CPU wakeups, disable CSI SOF/EOF irq */
+ writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+ writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_MASK_OFFSET);
+ writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+ writel(0, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_LEVEL_NOT_PULSE_OFFSET);
+ writel(0xffffffff, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_ENABLE_OFFSET);
+
+ /* Configure FE/PPI2CSI and enable FE/ PPI2CSI */
+ writel(0, csi2->base + CSI_REG_CSI_FE_MODE);
+ writel(CSI_SENSOR_INPUT, csi2->base + CSI_REG_CSI_FE_MUX_CTRL);
+ writel(CSI_CNTR_SENSOR_LINE_ID | CSI_CNTR_SENSOR_FRAME_ID,
+ csi2->base + CSI_REG_CSI_FE_SYNC_CNTR_SEL);
+ writel(((nlanes - 1) <<
+ PPI_INTF_CONFIG_NOF_ENABLED_DATALANES_SHIFT) |
+ (0 << PPI_INTF_CONFIG_RX_AUTO_CLKGATING_SHIFT),
+ csi2->base + CSI_REG_PPI2CSI_CONFIG_PPI_INTF);
+ writel(0x06, csi2->base + CSI_REG_PPI2CSI_CONFIG_CSI_FEATURE);
+ writel(1, csi2->base + CSI_REG_PPI2CSI_ENABLE);
+ writel(1, csi2->base + CSI_REG_CSI_FE_ENABLE);
+
+ return 0;
+}
+
+void ipu_isys_csi2_isr(struct ipu_isys_csi2 *csi2)
+{
+ u32 status;
+
+ ipu6_isys_register_errors(csi2);
+
+ status = readl(csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_STATUS_OFFSET);
+
+ writel(status, csi2->base + CSI_PORT_REG_BASE_IRQ_CSI_SYNC +
+ CSI_PORT_REG_BASE_IRQ_CLEAR_OFFSET);
+
+ if (status & IPU_CSI_RX_IRQ_FS_VC)
+ ipu_isys_csi2_sof_event(csi2);
+ if (status & IPU_CSI_RX_IRQ_FE_VC)
+ ipu_isys_csi2_eof_event(csi2);
+}
+
+unsigned int ipu_isys_csi2_get_current_field(struct ipu_isys_pipeline *ip,
+ unsigned int *timestamp)
+{
+ struct ipu_isys_video *av = container_of(ip, struct ipu_isys_video, ip);
+ struct ipu_isys *isys = av->isys;
+ unsigned int field = V4L2_FIELD_TOP;
+
+ struct ipu_isys_buffer *short_packet_ib =
+ list_last_entry(&ip->short_packet_active,
+ struct ipu_isys_buffer, head);
+ struct ipu_isys_private_buffer *pb =
+ ipu_isys_buffer_to_private_buffer(short_packet_ib);
+ struct ipu_isys_mipi_packet_header *ph =
+ (struct ipu_isys_mipi_packet_header *)
+ pb->buffer;
+
+ /* Check if the first SOF packet is received. */
+ if ((ph->dtype & IPU_ISYS_SHORT_PACKET_DTYPE_MASK) != 0)
+ dev_warn(&isys->adev->dev, "First short packet is not SOF.\n");
+ field = (ph->word_count % 2) ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
+ dev_dbg(&isys->adev->dev,
+ "Interlaced field ready. frame_num = %d field = %d\n",
+ ph->word_count, field);
+
+ return field;
+}
new file mode 100644
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU6_ISYS_CSI2_H
+#define IPU6_ISYS_CSI2_H
+
+struct ipu_isys_csi2_timing;
+struct ipu_isys_csi2;
+struct ipu_isys_pipeline;
+struct v4l2_subdev;
+
+#define IPU_ISYS_SHORT_PACKET_DTYPE_MASK 0x3f
+
+#endif /* IPU6_ISYS_CSI2_H */
new file mode 100644
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu-isys.h"
+#include "ipu-platform-regs.h"
+
+#define IPU_ISYS_GPC_NUM 16
+
+#ifndef CONFIG_PM
+#define pm_runtime_get_sync(d) 0
+#define pm_runtime_put(d) 0
+#endif
+
+struct ipu_isys_gpc {
+ bool enable;
+ unsigned int route;
+ unsigned int source;
+ unsigned int sense;
+ unsigned int gpcindex;
+ void *prit;
+};
+
+struct ipu_isys_gpcs {
+ bool gpc_enable;
+ struct ipu_isys_gpc gpc[IPU_ISYS_GPC_NUM];
+ void *prit;
+};
+
+static int ipu6_isys_gpc_global_enable_get(void *data, u64 *val)
+{
+ struct ipu_isys_gpcs *isys_gpcs = data;
+ struct ipu_isys *isys = isys_gpcs->prit;
+
+ mutex_lock(&isys->mutex);
+
+ *val = isys_gpcs->gpc_enable;
+
+ mutex_unlock(&isys->mutex);
+ return 0;
+}
+
+static int ipu6_isys_gpc_global_enable_set(void *data, u64 val)
+{
+ struct ipu_isys_gpcs *isys_gpcs = data;
+ struct ipu_isys *isys = isys_gpcs->prit;
+ void __iomem *base;
+ int i, ret;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ if (!isys || !isys->pdata || !isys->pdata->base)
+ return -EINVAL;
+
+ mutex_lock(&isys->mutex);
+
+ base = isys->pdata->base + IPU_ISYS_GPC_BASE;
+
+ ret = pm_runtime_get_sync(&isys->adev->dev);
+ if (ret < 0) {
+ pm_runtime_put(&isys->adev->dev);
+ mutex_unlock(&isys->mutex);
+ return ret;
+ }
+
+ if (!val) {
+ writel(0x0, base + IPU_ISYS_GPREG_TRACE_TIMER_RST);
+ writel(0x0, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE);
+ writel(0xffff, base + IPU_ISF_CDC_MMU_GPC_SOFT_RESET);
+ isys_gpcs->gpc_enable = false;
+ for (i = 0; i < IPU_ISYS_GPC_NUM; i++) {
+ isys_gpcs->gpc[i].enable = 0;
+ isys_gpcs->gpc[i].sense = 0;
+ isys_gpcs->gpc[i].route = 0;
+ isys_gpcs->gpc[i].source = 0;
+ }
+ pm_runtime_mark_last_busy(&isys->adev->dev);
+ pm_runtime_put_autosuspend(&isys->adev->dev);
+ } else {
+ /*
+ * Set gpc reg and start all gpc here.
+ * RST free running local timer.
+ */
+ writel(0x0, base + IPU_ISYS_GPREG_TRACE_TIMER_RST);
+ writel(0x1, base + IPU_ISYS_GPREG_TRACE_TIMER_RST);
+
+ for (i = 0; i < IPU_ISYS_GPC_NUM; i++) {
+ /* Enable */
+ writel(isys_gpcs->gpc[i].enable,
+ base + IPU_ISF_CDC_MMU_GPC_ENABLE0 + 4 * i);
+ /* Setting (route/source/sense) */
+ writel((isys_gpcs->gpc[i].sense
+ << IPU_GPC_SENSE_OFFSET)
+ + (isys_gpcs->gpc[i].route
+ << IPU_GPC_ROUTE_OFFSET)
+ + (isys_gpcs->gpc[i].source
+ << IPU_GPC_SOURCE_OFFSET),
+ base + IPU_ISF_CDC_MMU_GPC_CNT_SEL0 + 4 * i);
+ }
+
+ /* Soft reset and Overall Enable. */
+ writel(0x0, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE);
+ writel(0xffff, base + IPU_ISF_CDC_MMU_GPC_SOFT_RESET);
+ writel(0x1, base + IPU_ISF_CDC_MMU_GPC_OVERALL_ENABLE);
+
+ isys_gpcs->gpc_enable = true;
+ }
+
+ mutex_unlock(&isys->mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_gpc_globe_enable_fops,
+ ipu6_isys_gpc_global_enable_get,
+ ipu6_isys_gpc_global_enable_set, "%llu\n");
+
+static int ipu6_isys_gpc_count_get(void *data, u64 *val)
+{
+ struct ipu_isys_gpc *isys_gpc = data;
+ struct ipu_isys *isys = isys_gpc->prit;
+ void __iomem *base;
+
+ if (!isys || !isys->pdata || !isys->pdata->base)
+ return -EINVAL;
+
+ spin_lock(&isys->power_lock);
+ if (isys->power) {
+ base = isys->pdata->base + IPU_ISYS_GPC_BASE;
+ *val = readl(base + IPU_ISF_CDC_MMU_GPC_VALUE0
+ + 4 * isys_gpc->gpcindex);
+ } else {
+ *val = 0;
+ }
+ spin_unlock(&isys->power_lock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(isys_gpc_count_fops, ipu6_isys_gpc_count_get,
+ NULL, "%llu\n");
+
+int ipu_isys_gpc_init_debugfs(struct ipu_isys *isys)
+{
+ struct dentry *gpcdir;
+ struct dentry *dir;
+ struct dentry *file;
+ int i;
+ char gpcname[10];
+ struct ipu_isys_gpcs *isys_gpcs;
+
+ isys_gpcs = devm_kzalloc(&isys->adev->dev, sizeof(*isys_gpcs),
+ GFP_KERNEL);
+ if (!isys_gpcs)
+ return -ENOMEM;
+
+ gpcdir = debugfs_create_dir("gpcs", isys->debugfsdir);
+ if (IS_ERR(gpcdir))
+ return -ENOMEM;
+
+ isys_gpcs->prit = isys;
+ file = debugfs_create_file("enable", 0600, gpcdir, isys_gpcs,
+ &isys_gpc_globe_enable_fops);
+ if (IS_ERR(file))
+ goto err;
+
+ for (i = 0; i < IPU_ISYS_GPC_NUM; i++) {
+ sprintf(gpcname, "gpc%d", i);
+ dir = debugfs_create_dir(gpcname, gpcdir);
+ if (IS_ERR(dir))
+ goto err;
+
+ debugfs_create_bool("enable", 0600, dir,
+ &isys_gpcs->gpc[i].enable);
+
+ debugfs_create_u32("source", 0600, dir,
+ &isys_gpcs->gpc[i].source);
+
+ debugfs_create_u32("route", 0600, dir,
+ &isys_gpcs->gpc[i].route);
+
+ debugfs_create_u32("sense", 0600, dir,
+ &isys_gpcs->gpc[i].sense);
+
+ isys_gpcs->gpc[i].gpcindex = i;
+ isys_gpcs->gpc[i].prit = isys;
+ file = debugfs_create_file("count", 0400, dir,
+ &isys_gpcs->gpc[i],
+ &isys_gpc_count_fops);
+ if (IS_ERR(file))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(gpcdir);
+ return -ENOMEM;
+}
+#endif
new file mode 100644
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 - 2020 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <media/ipu-isys.h>
+#include <media/v4l2-device.h>
+#include "ipu.h"
+#include "ipu-buttress.h"
+#include "ipu-isys.h"
+#include "ipu-isys-csi2.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+#include "ipu6-isys-csi2.h"
+#include "ipu6-isys-phy.h"
+
+#define LOOP (2000)
+
+#define PHY_REG_INIT_CTL 0x00000694
+#define PHY_REG_INIT_CTL_PORT_OFFSET 0x00000600
+
+struct phy_reg {
+ u32 reg;
+ u32 val;
+};
+
+static const struct phy_reg common_init_regs[] = {
+ /* for TGL-U, use 0x80000000 */
+ {0x00000040, 0x80000000},
+ {0x00000044, 0x00a80880},
+ {0x00000044, 0x00b80880},
+ {0x00000010, 0x0000078c},
+ {0x00000344, 0x2f4401e2},
+ {0x00000544, 0x924401e2},
+ {0x00000744, 0x594401e2},
+ {0x00000944, 0x624401e2},
+ {0x00000b44, 0xfc4401e2},
+ {0x00000d44, 0xc54401e2},
+ {0x00000f44, 0x034401e2},
+ {0x00001144, 0x8f4401e2},
+ {0x00001344, 0x754401e2},
+ {0x00001544, 0xe94401e2},
+ {0x00001744, 0xcb4401e2},
+ {0x00001944, 0xfa4401e2}
+};
+
+static const struct phy_reg x1_port0_config_regs[] = {
+ {0x00000694, 0xc80060fa},
+ {0x00000680, 0x3d4f78ea},
+ {0x00000690, 0x10a0140b},
+ {0x000006a8, 0xdf04010a},
+ {0x00000700, 0x57050060},
+ {0x00000710, 0x0030001c},
+ {0x00000738, 0x5f004444},
+ {0x0000073c, 0x78464204},
+ {0x00000748, 0x7821f940},
+ {0x0000074c, 0xb2000433},
+ {0x00000494, 0xfe6030fa},
+ {0x00000480, 0x29ef5ed0},
+ {0x00000490, 0x10a0540b},
+ {0x000004a8, 0x7a01010a},
+ {0x00000500, 0xef053460},
+ {0x00000510, 0xe030101c},
+ {0x00000538, 0xdf808444},
+ {0x0000053c, 0xc8422204},
+ {0x00000540, 0x0180088c},
+ {0x00000574, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port1_config_regs[] = {
+ {0x00000c94, 0xc80060fa},
+ {0x00000c80, 0xcf47abea},
+ {0x00000c90, 0x10a0840b},
+ {0x00000ca8, 0xdf04010a},
+ {0x00000d00, 0x57050060},
+ {0x00000d10, 0x0030001c},
+ {0x00000d38, 0x5f004444},
+ {0x00000d3c, 0x78464204},
+ {0x00000d48, 0x7821f940},
+ {0x00000d4c, 0xb2000433},
+ {0x00000a94, 0xc91030fa},
+ {0x00000a80, 0x5a166ed0},
+ {0x00000a90, 0x10a0540b},
+ {0x00000aa8, 0x5d060100},
+ {0x00000b00, 0xef053460},
+ {0x00000b10, 0xa030101c},
+ {0x00000b38, 0xdf808444},
+ {0x00000b3c, 0xc8422204},
+ {0x00000b40, 0x0180088c},
+ {0x00000b74, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port2_config_regs[] = {
+ {0x00001294, 0x28f000fa},
+ {0x00001280, 0x08130cea},
+ {0x00001290, 0x10a0140b},
+ {0x000012a8, 0xd704010a},
+ {0x00001300, 0x8d050060},
+ {0x00001310, 0x0030001c},
+ {0x00001338, 0xdf008444},
+ {0x0000133c, 0x78422204},
+ {0x00001348, 0x7821f940},
+ {0x0000134c, 0x5a000433},
+ {0x00001094, 0x2d20b0fa},
+ {0x00001080, 0xade75dd0},
+ {0x00001090, 0x10a0540b},
+ {0x000010a8, 0xb101010a},
+ {0x00001100, 0x33053460},
+ {0x00001110, 0x0030101c},
+ {0x00001138, 0xdf808444},
+ {0x0000113c, 0xc8422204},
+ {0x00001140, 0x8180088c},
+ {0x00001174, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x1_port3_config_regs[] = {
+ {0x00001894, 0xc80060fa},
+ {0x00001880, 0x0f90fd6a},
+ {0x00001890, 0x10a0840b},
+ {0x000018a8, 0xdf04010a},
+ {0x00001900, 0x57050060},
+ {0x00001910, 0x0030001c},
+ {0x00001938, 0x5f004444},
+ {0x0000193c, 0x78464204},
+ {0x00001948, 0x7821f940},
+ {0x0000194c, 0xb2000433},
+ {0x00001694, 0x3050d0fa},
+ {0x00001680, 0x0ef6d050},
+ {0x00001690, 0x10a0540b},
+ {0x000016a8, 0xe301010a},
+ {0x00001700, 0x69053460},
+ {0x00001710, 0xa030101c},
+ {0x00001738, 0xdf808444},
+ {0x0000173c, 0xc8422204},
+ {0x00001740, 0x0180088c},
+ {0x00001774, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port0_config_regs[] = {
+ {0x00000694, 0xc80060fa},
+ {0x00000680, 0x3d4f78ea},
+ {0x00000690, 0x10a0140b},
+ {0x000006a8, 0xdf04010a},
+ {0x00000700, 0x57050060},
+ {0x00000710, 0x0030001c},
+ {0x00000738, 0x5f004444},
+ {0x0000073c, 0x78464204},
+ {0x00000748, 0x7821f940},
+ {0x0000074c, 0xb2000433},
+ {0x00000494, 0xc80060fa},
+ {0x00000480, 0x29ef5ed8},
+ {0x00000490, 0x10a0540b},
+ {0x000004a8, 0x7a01010a},
+ {0x00000500, 0xef053460},
+ {0x00000510, 0xe030101c},
+ {0x00000538, 0xdf808444},
+ {0x0000053c, 0xc8422204},
+ {0x00000540, 0x0180088c},
+ {0x00000574, 0x00000000},
+ {0x00000294, 0xc80060fa},
+ {0x00000280, 0xcb45b950},
+ {0x00000290, 0x10a0540b},
+ {0x000002a8, 0x8c01010a},
+ {0x00000300, 0xef053460},
+ {0x00000310, 0x8030101c},
+ {0x00000338, 0x41808444},
+ {0x0000033c, 0x32422204},
+ {0x00000340, 0x0180088c},
+ {0x00000374, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port1_config_regs[] = {
+ {0x00000c94, 0xc80060fa},
+ {0x00000c80, 0xcf47abea},
+ {0x00000c90, 0x10a0840b},
+ {0x00000ca8, 0xdf04010a},
+ {0x00000d00, 0x57050060},
+ {0x00000d10, 0x0030001c},
+ {0x00000d38, 0x5f004444},
+ {0x00000d3c, 0x78464204},
+ {0x00000d48, 0x7821f940},
+ {0x00000d4c, 0xb2000433},
+ {0x00000a94, 0xc80060fa},
+ {0x00000a80, 0x5a166ed8},
+ {0x00000a90, 0x10a0540b},
+ {0x00000aa8, 0x7a01010a},
+ {0x00000b00, 0xef053460},
+ {0x00000b10, 0xa030101c},
+ {0x00000b38, 0xdf808444},
+ {0x00000b3c, 0xc8422204},
+ {0x00000b40, 0x0180088c},
+ {0x00000b74, 0x00000000},
+ {0x00000894, 0xc80060fa},
+ {0x00000880, 0x4d4f21d0},
+ {0x00000890, 0x10a0540b},
+ {0x000008a8, 0x5601010a},
+ {0x00000900, 0xef053460},
+ {0x00000910, 0x8030101c},
+ {0x00000938, 0xdf808444},
+ {0x0000093c, 0xc8422204},
+ {0x00000940, 0x0180088c},
+ {0x00000974, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port2_config_regs[] = {
+ {0x00001294, 0xc80060fa},
+ {0x00001280, 0x08130cea},
+ {0x00001290, 0x10a0140b},
+ {0x000012a8, 0xd704010a},
+ {0x00001300, 0x8d050060},
+ {0x00001310, 0x0030001c},
+ {0x00001338, 0xdf008444},
+ {0x0000133c, 0x78422204},
+ {0x00001348, 0x7821f940},
+ {0x0000134c, 0x5a000433},
+ {0x00001094, 0xc80060fa},
+ {0x00001080, 0xade75dd8},
+ {0x00001090, 0x10a0540b},
+ {0x000010a8, 0xb101010a},
+ {0x00001100, 0x33053460},
+ {0x00001110, 0x0030101c},
+ {0x00001138, 0xdf808444},
+ {0x0000113c, 0xc8422204},
+ {0x00001140, 0x8180088c},
+ {0x00001174, 0x00000000},
+ {0x00000e94, 0xc80060fa},
+ {0x00000e80, 0x0fbf16d0},
+ {0x00000e90, 0x10a0540b},
+ {0x00000ea8, 0x7a01010a},
+ {0x00000f00, 0xf5053460},
+ {0x00000f10, 0xc030101c},
+ {0x00000f38, 0xdf808444},
+ {0x00000f3c, 0xc8422204},
+ {0x00000f40, 0x8180088c},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x2_port3_config_regs[] = {
+ {0x00001894, 0xc80060fa},
+ {0x00001880, 0x0f90fd6a},
+ {0x00001890, 0x10a0840b},
+ {0x000018a8, 0xdf04010a},
+ {0x00001900, 0x57050060},
+ {0x00001910, 0x0030001c},
+ {0x00001938, 0x5f004444},
+ {0x0000193c, 0x78464204},
+ {0x00001948, 0x7821f940},
+ {0x0000194c, 0xb2000433},
+ {0x00001694, 0xc80060fa},
+ {0x00001680, 0x0ef6d058},
+ {0x00001690, 0x10a0540b},
+ {0x000016a8, 0x7a01010a},
+ {0x00001700, 0x69053460},
+ {0x00001710, 0xa030101c},
+ {0x00001738, 0xdf808444},
+ {0x0000173c, 0xc8422204},
+ {0x00001740, 0x0180088c},
+ {0x00001774, 0x00000000},
+ {0x00001494, 0xc80060fa},
+ {0x00001480, 0xf9d34bd0},
+ {0x00001490, 0x10a0540b},
+ {0x000014a8, 0x7a01010a},
+ {0x00001500, 0x1b053460},
+ {0x00001510, 0x0030101c},
+ {0x00001538, 0xdf808444},
+ {0x0000153c, 0xc8422204},
+ {0x00001540, 0x8180088c},
+ {0x00001574, 0x00000000},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port0_config_regs[] = {
+ {0x00000694, 0xc80060fa},
+ {0x00000680, 0x3d4f78fa},
+ {0x00000690, 0x10a0140b},
+ {0x000006a8, 0xdf04010a},
+ {0x00000700, 0x57050060},
+ {0x00000710, 0x0030001c},
+ {0x00000738, 0x5f004444},
+ {0x0000073c, 0x78464204},
+ {0x00000748, 0x7821f940},
+ {0x0000074c, 0xb2000433},
+ {0x00000494, 0xfe6030fa},
+ {0x00000480, 0x29ef5ed8},
+ {0x00000490, 0x10a0540b},
+ {0x000004a8, 0x7a01010a},
+ {0x00000500, 0xef053460},
+ {0x00000510, 0xe030101c},
+ {0x00000538, 0xdf808444},
+ {0x0000053c, 0xc8422204},
+ {0x00000540, 0x0180088c},
+ {0x00000574, 0x00000004},
+ {0x00000294, 0x23e030fa},
+ {0x00000280, 0xcb45b950},
+ {0x00000290, 0x10a0540b},
+ {0x000002a8, 0x8c01010a},
+ {0x00000300, 0xef053460},
+ {0x00000310, 0x8030101c},
+ {0x00000338, 0x41808444},
+ {0x0000033c, 0x32422204},
+ {0x00000340, 0x0180088c},
+ {0x00000374, 0x00000004},
+ {0x00000894, 0x5620b0fa},
+ {0x00000880, 0x4d4f21dc},
+ {0x00000890, 0x10a0540b},
+ {0x000008a8, 0x5601010a},
+ {0x00000900, 0xef053460},
+ {0x00000910, 0x8030101c},
+ {0x00000938, 0xdf808444},
+ {0x0000093c, 0xc8422204},
+ {0x00000940, 0x0180088c},
+ {0x00000974, 0x00000004},
+ {0x00000a94, 0xc91030fa},
+ {0x00000a80, 0x5a166ecc},
+ {0x00000a90, 0x10a0540b},
+ {0x00000aa8, 0x5d01010a},
+ {0x00000b00, 0xef053460},
+ {0x00000b10, 0xa030101c},
+ {0x00000b38, 0xdf808444},
+ {0x00000b3c, 0xc8422204},
+ {0x00000b40, 0x0180088c},
+ {0x00000b74, 0x00000004},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port1_config_regs[] = {
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port2_config_regs[] = {
+ {0x00001294, 0x28f000fa},
+ {0x00001280, 0x08130cfa},
+ {0x00001290, 0x10c0140b},
+ {0x000012a8, 0xd704010a},
+ {0x00001300, 0x8d050060},
+ {0x00001310, 0x0030001c},
+ {0x00001338, 0xdf008444},
+ {0x0000133c, 0x78422204},
+ {0x00001348, 0x7821f940},
+ {0x0000134c, 0x5a000433},
+ {0x00001094, 0x2d20b0fa},
+ {0x00001080, 0xade75dd8},
+ {0x00001090, 0x10a0540b},
+ {0x000010a8, 0xb101010a},
+ {0x00001100, 0x33053460},
+ {0x00001110, 0x0030101c},
+ {0x00001138, 0xdf808444},
+ {0x0000113c, 0xc8422204},
+ {0x00001140, 0x8180088c},
+ {0x00001174, 0x00000004},
+ {0x00000e94, 0xd308d0fa},
+ {0x00000e80, 0x0fbf16d0},
+ {0x00000e90, 0x10a0540b},
+ {0x00000ea8, 0x2c01010a},
+ {0x00000f00, 0xf5053460},
+ {0x00000f10, 0xc030101c},
+ {0x00000f38, 0xdf808444},
+ {0x00000f3c, 0xc8422204},
+ {0x00000f40, 0x8180088c},
+ {0x00000f74, 0x00000004},
+ {0x00001494, 0x136850fa},
+ {0x00001480, 0xf9d34bdc},
+ {0x00001490, 0x10a0540b},
+ {0x000014a8, 0x5a01010a},
+ {0x00001500, 0x1b053460},
+ {0x00001510, 0x0030101c},
+ {0x00001538, 0xdf808444},
+ {0x0000153c, 0xc8422204},
+ {0x00001540, 0x8180088c},
+ {0x00001574, 0x00000004},
+ {0x00001694, 0x3050d0fa},
+ {0x00001680, 0x0ef6d04c},
+ {0x00001690, 0x10a0540b},
+ {0x000016a8, 0xe301010a},
+ {0x00001700, 0x69053460},
+ {0x00001710, 0xa030101c},
+ {0x00001738, 0xdf808444},
+ {0x0000173c, 0xc8422204},
+ {0x00001740, 0x0180088c},
+ {0x00001774, 0x00000004},
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg x4_port3_config_regs[] = {
+ {0x00000000, 0x00000000}
+};
+
+static const struct phy_reg *x1_config_regs[4] = {
+ x1_port0_config_regs,
+ x1_port1_config_regs,
+ x1_port2_config_regs,
+ x1_port3_config_regs
+};
+
+static const struct phy_reg *x2_config_regs[4] = {
+ x2_port0_config_regs,
+ x2_port1_config_regs,
+ x2_port2_config_regs,
+ x2_port3_config_regs
+};
+
+static const struct phy_reg *x4_config_regs[4] = {
+ x4_port0_config_regs,
+ x4_port1_config_regs,
+ x4_port2_config_regs,
+ x4_port3_config_regs
+};
+
+static const struct phy_reg **config_regs[3] = {
+ x1_config_regs,
+ x2_config_regs,
+ x4_config_regs,
+};
+
+int ipu6_isys_phy_powerup_ack(struct ipu_isys *isys, unsigned int phy_id)
+{
+ unsigned int i;
+ u32 val;
+ void __iomem *isys_base = isys->pdata->base;
+
+ val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+ val |= CSI_REG_HUB_GPREG_PHY_CONTROL_PWR_EN;
+ writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+
+ for (i = 0; i < LOOP; i++) {
+ if (readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id)) &
+ CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK)
+ return 0;
+ usleep_range(100, 200);
+ }
+
+ dev_warn(&isys->adev->dev, "PHY%d powerup ack timeout", phy_id);
+
+ return -ETIMEDOUT;
+}
+
+int ipu6_isys_phy_powerdown_ack(struct ipu_isys *isys, unsigned int phy_id)
+{
+ unsigned int i;
+ u32 val;
+ void __iomem *isys_base = isys->pdata->base;
+
+ writel(0, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+ for (i = 0; i < LOOP; i++) {
+ usleep_range(10, 20);
+ val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id));
+ if (!(val & CSI_REG_HUB_GPREG_PHY_STATUS_POWER_ACK))
+ return 0;
+ }
+
+ dev_warn(&isys->adev->dev, "PHY %d poweroff ack timeout.\n", phy_id);
+
+ return -ETIMEDOUT;
+}
+
+int ipu6_isys_phy_reset(struct ipu_isys *isys, unsigned int phy_id,
+ bool assert)
+{
+ void __iomem *isys_base = isys->pdata->base;
+ u32 val;
+
+ val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+ if (assert)
+ val |= CSI_REG_HUB_GPREG_PHY_CONTROL_RESET;
+ else
+ val &= ~(CSI_REG_HUB_GPREG_PHY_CONTROL_RESET);
+
+ writel(val, isys_base + CSI_REG_HUB_GPREG_PHY_CONTROL(phy_id));
+
+ return 0;
+}
+
+int ipu6_isys_phy_ready(struct ipu_isys *isys, unsigned int phy_id)
+{
+ unsigned int i;
+ u32 val;
+ void __iomem *isys_base = isys->pdata->base;
+
+ for (i = 0; i < LOOP; i++) {
+ val = readl(isys_base + CSI_REG_HUB_GPREG_PHY_STATUS(phy_id));
+ dev_dbg(&isys->adev->dev, "PHY%d ready status 0x%x\n",
+ phy_id, val);
+ if (val & CSI_REG_HUB_GPREG_PHY_STATUS_PHY_READY)
+ return 0;
+ usleep_range(10, 20);
+ }
+
+ dev_warn(&isys->adev->dev, "PHY%d ready timeout\n", phy_id);
+
+ return -ETIMEDOUT;
+}
+
+int ipu6_isys_phy_common_init(struct ipu_isys *isys)
+{
+ unsigned int phy_id;
+ void __iomem *phy_base;
+ struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+ struct ipu_device *isp = adev->isp;
+ void __iomem *isp_base = isp->base;
+ struct v4l2_async_subdev *asd;
+ struct sensor_async_subdev *s_asd;
+ unsigned int i;
+
+ list_for_each_entry(asd, &isys->notifier.asd_list, asd_list) {
+ s_asd = container_of(asd, struct sensor_async_subdev, asd);
+ phy_id = s_asd->csi2.port / 4;
+ phy_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id);
+
+ for (i = 0 ; i < ARRAY_SIZE(common_init_regs); i++) {
+ writel(common_init_regs[i].val,
+ phy_base + common_init_regs[i].reg);
+ }
+ }
+
+ return 0;
+}
+
+static int ipu6_isys_driver_port_to_phy_port(struct ipu_isys_csi2_config *cfg)
+{
+ int phy_port;
+ int ret;
+
+ if (!(cfg->nlanes == 4 || cfg->nlanes == 2 || cfg->nlanes == 1))
+ return -EINVAL;
+
+ /* B,F -> C0 A,E -> C1 C,G -> C2 D,H -> C4 */
+ /* normalize driver port number */
+ phy_port = cfg->port % 4;
+
+ /* swap port number only for A and B */
+ if (phy_port == 0)
+ phy_port = 1;
+ else if (phy_port == 1)
+ phy_port = 0;
+
+ ret = phy_port;
+
+ /* check validity per lane configuration */
+ if ((cfg->nlanes == 4) &&
+ !(phy_port == 0 || phy_port == 2))
+ ret = -EINVAL;
+ else if ((cfg->nlanes == 2 || cfg->nlanes == 1) &&
+ !(phy_port >= 0 && phy_port <= 3))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+int ipu6_isys_phy_config(struct ipu_isys *isys)
+{
+ int phy_port;
+ unsigned int phy_id;
+ void __iomem *phy_base;
+ struct ipu_bus_device *adev = to_ipu_bus_device(&isys->adev->dev);
+ struct ipu_device *isp = adev->isp;
+ void __iomem *isp_base = isp->base;
+ const struct phy_reg **phy_config_regs;
+ struct v4l2_async_subdev *asd;
+ struct sensor_async_subdev *s_asd;
+ struct ipu_isys_csi2_config cfg;
+ int i;
+
+ list_for_each_entry(asd, &isys->notifier.asd_list, asd_list) {
+ s_asd = container_of(asd, struct sensor_async_subdev, asd);
+ cfg.port = s_asd->csi2.port;
+ cfg.nlanes = s_asd->csi2.nlanes;
+ phy_port = ipu6_isys_driver_port_to_phy_port(&cfg);
+ if (phy_port < 0) {
+ dev_err(&isys->adev->dev, "invalid port %d for lane %d",
+ cfg.port, cfg.nlanes);
+ return -ENXIO;
+ }
+
+ phy_id = cfg.port / 4;
+ phy_base = isp_base + IPU6_ISYS_PHY_BASE(phy_id);
+ dev_dbg(&isys->adev->dev, "port%d PHY%u lanes %u\n",
+ cfg.port, phy_id, cfg.nlanes);
+
+ phy_config_regs = config_regs[cfg.nlanes/2];
+ cfg.port = phy_port;
+ for (i = 0; phy_config_regs[cfg.port][i].reg; i++) {
+ writel(phy_config_regs[cfg.port][i].val,
+ phy_base + phy_config_regs[cfg.port][i].reg);
+ }
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2013 - 2020 Intel Corporation
+ */
+
+#ifndef IPU6_ISYS_PHY_H
+#define IPU6_ISYS_PHY_H
+
+/* bridge to phy in buttress reg map, each phy has 16 kbytes
+ * for tgl u/y, only 2 phys
+ */
+#define IPU6_ISYS_PHY_0_BASE 0x10000
+#define IPU6_ISYS_PHY_1_BASE 0x14000
+#define IPU6_ISYS_PHY_2_BASE 0x18000
+#define IPU6_ISYS_PHY_BASE(i) (0x10000 + (i) * 0x4000)
+
+/* ppi mapping per phy :
+ *
+ * x4x4:
+ * port0 - PPI range {0, 1, 2, 3, 4}
+ * port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x4x2:
+ * port0 - PPI range {0, 1, 2, 3, 4}
+ * port2 - PPI range {6, 7, 8}
+ *
+ * x2x4:
+ * port0 - PPI range {0, 1, 2}
+ * port2 - PPI range {6, 7, 8, 9, 10}
+ *
+ * x2x2:
+ * port0 - PPI range {0, 1, 2}
+ * port1 - PPI range {3, 4, 5}
+ * port2 - PPI range {6, 7, 8}
+ * port3 - PPI range {9, 10, 11}
+ */
+
+/* cbbs config regs */
+#define PHY_CBBS1_BASE 0x0
+/* register offset */
+#define PHY_CBBS1_DFX_VMISCCTL 0x0
+#define PHY_CBBS1_DFX_VBYTESEL0 0x4
+#define PHY_CBBS1_DFX_VBYTESEL1 0x8
+#define PHY_CBBS1_VISA2OBS_CTRL_REG 0xc
+#define PHY_CBBS1_PGL_CTRL_REG 0x10
+#define PHY_CBBS1_RCOMP_CTRL_REG_1 0x14
+#define PHY_CBBS1_RCOMP_CTRL_REG_2 0x18
+#define PHY_CBBS1_RCOMP_CTRL_REG_3 0x1c
+#define PHY_CBBS1_RCOMP_CTRL_REG_4 0x20
+#define PHY_CBBS1_RCOMP_CTRL_REG_5 0x24
+#define PHY_CBBS1_RCOMP_STATUS_REG_1 0x28
+#define PHY_CBBS1_RCOMP_STATUS_REG_2 0x2c
+#define PHY_CBBS1_CLOCK_CTRL_REG 0x30
+#define PHY_CBBS1_CBB_ISOLATION_REG 0x34
+#define PHY_CBBS1_CBB_PLL_CONTROL 0x38
+#define PHY_CBBS1_CBB_STATUS_REG 0x3c
+#define PHY_CBBS1_AFE_CONTROL_REG_1 0x40
+#define PHY_CBBS1_AFE_CONTROL_REG_2 0x44
+#define PHY_CBBS1_CBB_SPARE 0x48
+#define PHY_CBBS1_CRI_CLK_CONTROL 0x4c
+
+/* dbbs shared, i = [0..11] */
+#define PHY_DBBS_SHARED(ppi) ((ppi) * 0x200 + 0x200)
+/* register offset */
+#define PHY_DBBDFE_DFX_V1MISCCTL 0x0
+#define PHY_DBBDFE_DFX_V1BYTESEL0 0x4
+#define PHY_DBBDFE_DFX_V1BYTESEL1 0x8
+#define PHY_DBBDFE_DFX_V2MISCCTL 0xc
+#define PHY_DBBDFE_DFX_V2BYTESEL0 0x10
+#define PHY_DBBDFE_DFX_V2BYTESEL1 0x14
+#define PHY_DBBDFE_GBLCTL 0x18
+#define PHY_DBBDFE_GBL_STATUS 0x1c
+
+/* dbbs udln, i = [0..11] */
+#define IPU6_ISYS_PHY_DBBS_UDLN(ppi) ((ppi) * 0x200 + 0x280)
+/* register offset */
+#define PHY_DBBUDLN_CTL 0x0
+#define PHY_DBBUDLN_CLK_CTL 0x4
+#define PHY_DBBUDLN_SOFT_RST_CTL 0x8
+#define PHY_DBBUDLN_STRAP_VALUES 0xc
+#define PHY_DBBUDLN_TXRX_CTL 0x10
+#define PHY_DBBUDLN_MST_SLV_INIT_CTL 0x14
+#define PHY_DBBUDLN_TX_TIMING_CTL0 0x18
+#define PHY_DBBUDLN_TX_TIMING_CTL1 0x1c
+#define PHY_DBBUDLN_TX_TIMING_CTL2 0x20
+#define PHY_DBBUDLN_TX_TIMING_CTL3 0x24
+#define PHY_DBBUDLN_RX_TIMING_CTL 0x28
+#define PHY_DBBUDLN_PPI_STATUS_CTL 0x2c
+#define PHY_DBBUDLN_PPI_STATUS 0x30
+#define PHY_DBBUDLN_ERR_CTL 0x34
+#define PHY_DBBUDLN_ERR_STATUS 0x38
+#define PHY_DBBUDLN_DFX_LPBK_CTL 0x3c
+#define PHY_DBBUDLN_DFX_PPI_CTL 0x40
+#define PHY_DBBUDLN_DFX_TX_DPHY_CTL 0x44
+#define PHY_DBBUDLN_DFX_TXRX_PRBSPAT_CTL 0x48
+#define PHY_DBBUDLN_DFX_TXRX_PRBSPAT_SEED 0x4c
+#define PHY_DBBUDLN_DFX_PRBSPAT_MAX_WRD_CNT 0x50
+#define PHY_DBBUDLN_DFX_PRBSPAT_STATUS 0x54
+#define PHY_DBBUDLN_DFX_PRBSPAT_WRD_CNT0_STATUS 0x58
+#define PHY_DBBUDLN_DFX_PRBSPAT_WRD_CNT1_STATUS 0x5c
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_ERR_STATUS 0x60
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_REF_STATUS 0x64
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_WRD_CNT0_STATUS 0x68
+#define PHY_DBBUDLN_DFX_PRBSPAT_FF_WRD_CNT1_STATUS 0x6c
+#define PHY_DBBUDLN_RSVD_CTL 0x70
+#define PHY_DBBUDLN_TINIT_DONE BIT(27)
+
+/* dbbs supar, i = [0..11] */
+#define IPU6_ISYS_PHY_DBBS_SUPAR(ppi) ((ppi) * 0x200 + 0x300)
+/* register offset */
+#define PHY_DBBSUPAR_TXRX_FUPAR_CTL 0x0
+#define PHY_DBBSUPAR_TXHS_AFE_CTL 0x4
+#define PHY_DBBSUPAR_TXHS_AFE_LEGDIS_CTL 0x8
+#define PHY_DBBSUPAR_TXHS_AFE_EQ_CTL 0xc
+#define PHY_DBBSUPAR_RXHS_AFE_CTL1 0x10
+#define PHY_DBBSUPAR_RXHS_AFE_PICTL1 0x14
+#define PHY_DBBSUPAR_TXRXLP_AFE_CTL 0x18
+#define PHY_DBBSUPAR_DFX_TXRX_STATUS 0x1c
+#define PHY_DBBSUPAR_DFX_TXRX_CTL 0x20
+#define PHY_DBBSUPAR_DFX_DIGMON_CTL 0x24
+#define PHY_DBBSUPAR_DFX_LOCMON_CTL 0x28
+#define PHY_DBBSUPAR_DFX_RCOMP_CTL1 0x2c
+#define PHY_DBBSUPAR_DFX_RCOMP_CTL2 0x30
+#define PHY_DBBSUPAR_CAL_TOP1 0x34
+#define PHY_DBBSUPAR_CAL_SHARED1 0x38
+#define PHY_DBBSUPAR_CAL_SHARED2 0x3c
+#define PHY_DBBSUPAR_CAL_CDR1 0x40
+#define PHY_DBBSUPAR_CAL_OCAL1 0x44
+#define PHY_DBBSUPAR_CAL_DCC_DLL1 0x48
+#define PHY_DBBSUPAR_CAL_DLL2 0x4c
+#define PHY_DBBSUPAR_CAL_DFX1 0x50
+#define PHY_DBBSUPAR_CAL_DFX2 0x54
+#define PHY_DBBSUPAR_CAL_DFX3 0x58
+#define PHY_BBSUPAR_CAL_DFX4 0x5c
+#define PHY_DBBSUPAR_CAL_DFX5 0x60
+#define PHY_DBBSUPAR_CAL_DFX6 0x64
+#define PHY_DBBSUPAR_CAL_DFX7 0x68
+#define PHY_DBBSUPAR_DFX_AFE_SPARE_CTL1 0x6c
+#define PHY_DBBSUPAR_DFX_AFE_SPARE_CTL2 0x70
+#define PHY_DBBSUPAR_SPARE 0x74
+
+/* sai, i = [0..11] */
+#define IPU6_ISYS_PHY_SAI 0xf800
+/* register offset */
+#define PHY_SAI_CTRL_REG0 0x40
+#define PHY_SAI_CTRL_REG0_1 0x44
+#define PHY_SAI_WR_REG0 0x48
+#define PHY_SAI_WR_REG0_1 0x4c
+#define PHY_SAI_RD_REG0 0x50
+#define PHY_SAI_RD_REG0_1 0x54
+
+int ipu6_isys_phy_powerup_ack(struct ipu_isys *isys, unsigned int phy_id);
+int ipu6_isys_phy_powerdown_ack(struct ipu_isys *isys, unsigned int phy_id);
+int ipu6_isys_phy_reset(struct ipu_isys *isys, unsigned int phy_id,
+ bool assert);
+int ipu6_isys_phy_ready(struct ipu_isys *isys, unsigned int phy_id);
+int ipu6_isys_phy_common_init(struct ipu_isys *isys);
+int ipu6_isys_phy_config(struct ipu_isys *isys);
+#endif
new file mode 100644
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/module.h>
+#include <media/v4l2-event.h>
+
+#include "ipu.h"
+#include "ipu-platform-regs.h"
+#include "ipu-trace.h"
+#include "ipu-isys.h"
+#include "ipu-platform-isys-csi2-reg.h"
+
+const struct ipu_isys_pixelformat ipu_isys_pfmts[] = {
+ {V4L2_PIX_FMT_SBGGR12, 16, 12, 0, MEDIA_BUS_FMT_SBGGR12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGBRG12, 16, 12, 0, MEDIA_BUS_FMT_SGBRG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGRBG12, 16, 12, 0, MEDIA_BUS_FMT_SGRBG12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SRGGB12, 16, 12, 0, MEDIA_BUS_FMT_SRGGB12_1X12,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SBGGR10, 16, 10, 0, MEDIA_BUS_FMT_SBGGR10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGBRG10, 16, 10, 0, MEDIA_BUS_FMT_SGBRG10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SGRBG10, 16, 10, 0, MEDIA_BUS_FMT_SGRBG10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SRGGB10, 16, 10, 0, MEDIA_BUS_FMT_SRGGB10_1X10,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW16},
+ {V4L2_PIX_FMT_SBGGR8, 8, 8, 0, MEDIA_BUS_FMT_SBGGR8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGBRG8, 8, 8, 0, MEDIA_BUS_FMT_SGBRG8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SGRBG8, 8, 8, 0, MEDIA_BUS_FMT_SGRBG8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {V4L2_PIX_FMT_SRGGB8, 8, 8, 0, MEDIA_BUS_FMT_SRGGB8_1X8,
+ IPU_FW_ISYS_FRAME_FORMAT_RAW8},
+ {}
+};
+
+struct ipu_trace_block isys_trace_blocks[] = {
+ {
+ .offset = IPU_TRACE_REG_IS_TRACE_UNIT_BASE,
+ .type = IPU_TRACE_BLOCK_TUN,
+ },
+ {
+ .offset = IPU_TRACE_REG_IS_SP_EVQ_BASE,
+ .type = IPU_TRACE_BLOCK_TM,
+ },
+ {
+ .offset = IPU_TRACE_REG_IS_SP_GPC_BASE,
+ .type = IPU_TRACE_BLOCK_GPC,
+ },
+ {
+ .offset = IPU_TRACE_REG_IS_ISL_GPC_BASE,
+ .type = IPU_TRACE_BLOCK_GPC,
+ },
+ {
+ .offset = IPU_TRACE_REG_IS_MMU_GPC_BASE,
+ .type = IPU_TRACE_BLOCK_GPC,
+ },
+ {
+ /* Note! this covers all 8 blocks */
+ .offset = IPU_TRACE_REG_CSI2_TM_BASE(0),
+ .type = IPU_TRACE_CSI2,
+ },
+ {
+ /* Note! this covers all 11 blocks */
+ .offset = IPU_TRACE_REG_CSI2_PORT_SIG2SIO_GR_BASE(0),
+ .type = IPU_TRACE_SIG2CIOS,
+ },
+ {
+ .offset = IPU_TRACE_REG_IS_GPREG_TRACE_TIMER_RST_N,
+ .type = IPU_TRACE_TIMER_RST,
+ },
+ {
+ .type = IPU_TRACE_BLOCK_END,
+ }
+};
+
+void isys_setup_hw(struct ipu_isys *isys)
+{
+ void __iomem *base = isys->pdata->base;
+ const u8 *thd = isys->pdata->ipdata->hw_variant.cdc_fifo_threshold;
+ u32 irqs = 0;
+ unsigned int i, nr;
+
+ nr = (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) ?
+ IPU6_ISYS_CSI_PORT_NUM : IPU6SE_ISYS_CSI_PORT_NUM;
+
+ /* Enable irqs for all MIPI ports */
+ for (i = 0; i < nr; i++)
+ irqs |= IPU_ISYS_UNISPART_IRQ_CSI2(i);
+
+ writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_EDGE);
+ writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_LEVEL_NOT_PULSE);
+ writel(0xffffffff, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR);
+ writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_MASK);
+ writel(irqs, base + IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_ENABLE);
+
+ irqs = ISYS_UNISPART_IRQS;
+ writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_EDGE);
+ writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_LEVEL_NOT_PULSE);
+ writel(0xffffffff, base + IPU_REG_ISYS_UNISPART_IRQ_CLEAR);
+ writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_MASK);
+ writel(irqs, base + IPU_REG_ISYS_UNISPART_IRQ_ENABLE);
+
+ writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG);
+ writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_MUX_REG);
+
+ /* Write CDC FIFO threshold values for isys */
+ for (i = 0; i < isys->pdata->ipdata->hw_variant.cdc_fifos; i++)
+ writel(thd[i], base + IPU_REG_ISYS_CDC_THRESHOLD(i));
+}
+
+irqreturn_t isys_isr(struct ipu_bus_device *adev)
+{
+ struct ipu_isys *isys = ipu_bus_get_drvdata(adev);
+ void __iomem *base = isys->pdata->base;
+ u32 status_sw, status_csi;
+
+ spin_lock(&isys->power_lock);
+ if (!isys->power) {
+ spin_unlock(&isys->power_lock);
+ return IRQ_NONE;
+ }
+
+ status_csi = readl(isys->pdata->base +
+ IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS);
+ status_sw = readl(isys->pdata->base + IPU_REG_ISYS_UNISPART_IRQ_STATUS);
+
+ writel(ISYS_UNISPART_IRQS & ~IPU_ISYS_UNISPART_IRQ_SW,
+ base + IPU_REG_ISYS_UNISPART_IRQ_MASK);
+
+ do {
+ writel(status_csi, isys->pdata->base +
+ IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_CLEAR);
+ writel(status_sw, isys->pdata->base +
+ IPU_REG_ISYS_UNISPART_IRQ_CLEAR);
+
+ if (isys->isr_csi2_bits & status_csi) {
+ unsigned int i;
+
+ for (i = 0; i < isys->pdata->ipdata->csi2.nports; i++) {
+ /* irq from not enabled port */
+ if (!isys->csi2[i].base)
+ continue;
+ if (status_csi & IPU_ISYS_UNISPART_IRQ_CSI2(i))
+ ipu_isys_csi2_isr(&isys->csi2[i]);
+ }
+ }
+
+ writel(0, base + IPU_REG_ISYS_UNISPART_SW_IRQ_REG);
+
+ if (!isys_isr_one(adev))
+ status_sw = IPU_ISYS_UNISPART_IRQ_SW;
+ else
+ status_sw = 0;
+
+ status_csi = readl(isys->pdata->base +
+ IPU_REG_ISYS_CSI_TOP_CTRL0_IRQ_STATUS);
+ status_sw |= readl(isys->pdata->base +
+ IPU_REG_ISYS_UNISPART_IRQ_STATUS);
+ } while (((status_csi & isys->isr_csi2_bits) ||
+ (status_sw & IPU_ISYS_UNISPART_IRQ_SW)) &&
+ !isys->adev->isp->flr_done);
+
+ writel(ISYS_UNISPART_IRQS, base + IPU_REG_ISYS_UNISPART_IRQ_MASK);
+
+ spin_unlock(&isys->power_lock);
+
+ return IRQ_HANDLED;
+}
+
new file mode 100644
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include "ipu-psys.h"
+#include "ipu6-ppg.h"
+
+extern bool enable_power_gating;
+
+struct sched_list {
+ struct list_head list;
+ /* to protect the list */
+ struct mutex lock;
+};
+
+static struct sched_list start_list = {
+ .list = LIST_HEAD_INIT(start_list.list),
+ .lock = __MUTEX_INITIALIZER(start_list.lock),
+};
+
+static struct sched_list stop_list = {
+ .list = LIST_HEAD_INIT(stop_list.list),
+ .lock = __MUTEX_INITIALIZER(stop_list.lock),
+};
+
+static struct sched_list *get_sc_list(enum SCHED_LIST type)
+{
+ /* for debug purposes */
+ WARN_ON(type != SCHED_START_LIST && type != SCHED_STOP_LIST);
+
+ if (type == SCHED_START_LIST)
+ return &start_list;
+ return &stop_list;
+}
+
+static bool is_kppg_in_list(struct ipu_psys_ppg *kppg, struct list_head *head)
+{
+ struct ipu_psys_ppg *tmp;
+
+ list_for_each_entry(tmp, head, sched_list) {
+ if (kppg == tmp)
+ return true;
+ }
+
+ return false;
+}
+
+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg,
+ enum SCHED_LIST type)
+{
+ struct sched_list *sc_list = get_sc_list(type);
+ struct ipu_psys_ppg *tmp0, *tmp1;
+ struct ipu_psys *psys = kppg->fh->psys;
+
+ mutex_lock(&sc_list->lock);
+ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) {
+ if (tmp0 == kppg) {
+ dev_dbg(&psys->adev->dev,
+ "remove from %s list, kppg(%d 0x%p) state %d\n",
+ type == SCHED_START_LIST ? "start" : "stop",
+ kppg->kpg->pg->ID, kppg, kppg->state);
+ list_del_init(&kppg->sched_list);
+ }
+ }
+ mutex_unlock(&sc_list->lock);
+}
+
+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg,
+ enum SCHED_LIST type)
+{
+ int cur_pri = kppg->pri_base + kppg->pri_dynamic;
+ struct sched_list *sc_list = get_sc_list(type);
+ struct ipu_psys *psys = kppg->fh->psys;
+ struct ipu_psys_ppg *tmp0, *tmp1;
+
+ dev_dbg(&psys->adev->dev,
+ "add to %s list, kppg(%d 0x%p) state %d prio(%d %d) fh 0x%p\n",
+ type == SCHED_START_LIST ? "start" : "stop",
+ kppg->kpg->pg->ID, kppg, kppg->state,
+ kppg->pri_base, kppg->pri_dynamic, kppg->fh);
+
+ mutex_lock(&sc_list->lock);
+ if (list_empty(&sc_list->list)) {
+ list_add(&kppg->sched_list, &sc_list->list);
+ goto out;
+ }
+
+ if (is_kppg_in_list(kppg, &sc_list->list)) {
+ dev_dbg(&psys->adev->dev, "kppg already in list\n");
+ goto out;
+ }
+
+ list_for_each_entry_safe(tmp0, tmp1, &sc_list->list, sched_list) {
+ int tmp_pri = tmp0->pri_base + tmp0->pri_dynamic;
+
+ dev_dbg(&psys->adev->dev,
+ "found kppg(%d 0x%p), state %d pri(%d %d) fh 0x%p\n",
+ tmp0->kpg->pg->ID, tmp0, tmp0->state,
+ tmp0->pri_base, tmp0->pri_dynamic, tmp0->fh);
+
+ if (type == SCHED_START_LIST && tmp_pri > cur_pri) {
+ list_add(&kppg->sched_list, tmp0->sched_list.prev);
+ goto out;
+ } else if (type == SCHED_STOP_LIST && tmp_pri < cur_pri) {
+ list_add(&kppg->sched_list, tmp0->sched_list.prev);
+ goto out;
+ }
+ }
+
+ list_add_tail(&kppg->sched_list, &sc_list->list);
+out:
+ mutex_unlock(&sc_list->lock);
+}
+
+static int ipu_psys_detect_resource_contention(struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys_resource_pool *try_res_pool;
+ struct ipu_psys *psys = kppg->fh->psys;
+ int ret = 0;
+ int state;
+
+ try_res_pool = kzalloc(sizeof(*try_res_pool), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(try_res_pool))
+ return -ENOMEM;
+
+ mutex_lock(&kppg->mutex);
+ state = kppg->state;
+ mutex_unlock(&kppg->mutex);
+ if (state == PPG_STATE_STARTED || state == PPG_STATE_RUNNING ||
+ state == PPG_STATE_RESUMED)
+ goto exit;
+
+ ret = ipu_psys_resource_pool_init(try_res_pool);
+ if (ret < 0) {
+ dev_err(&psys->adev->dev, "unable to alloc pg resources\n");
+ WARN_ON(1);
+ goto exit;
+ }
+
+ ipu_psys_resource_copy(&psys->resource_pool_running, try_res_pool);
+ ret = ipu_psys_try_allocate_resources(&psys->adev->dev,
+ kppg->kpg->pg,
+ kppg->manifest,
+ try_res_pool);
+
+ ipu_psys_resource_pool_cleanup(try_res_pool);
+exit:
+ kfree(try_res_pool);
+
+ return ret;
+}
+
+static void ipu_psys_scheduler_ppg_sort(struct ipu_psys *psys, bool *stopping)
+{
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_fh *fh;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ if (kppg->state == PPG_STATE_START ||
+ kppg->state == PPG_STATE_RESUME) {
+ ipu_psys_scheduler_add_kppg(kppg,
+ SCHED_START_LIST);
+ } else if (kppg->state == PPG_STATE_RUNNING) {
+ ipu_psys_scheduler_add_kppg(kppg,
+ SCHED_STOP_LIST);
+ } else if (kppg->state == PPG_STATE_SUSPENDING ||
+ kppg->state == PPG_STATE_STOPPING) {
+ /* there are some suspending/stopping ppgs */
+ *stopping = true;
+ } else if (kppg->state == PPG_STATE_RESUMING ||
+ kppg->state == PPG_STATE_STARTING) {
+ /* how about kppg are resuming/starting? */
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+}
+
+static void ipu_psys_scheduler_update_start_ppg_priority(void)
+{
+ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST);
+ struct ipu_psys_ppg *kppg, *tmp;
+
+ mutex_lock(&sc_list->lock);
+ if (!list_empty(&sc_list->list))
+ list_for_each_entry_safe(kppg, tmp, &sc_list->list, sched_list)
+ kppg->pri_dynamic--;
+ mutex_unlock(&sc_list->lock);
+}
+
+static bool ipu_psys_scheduler_switch_ppg(struct ipu_psys *psys)
+{
+ struct sched_list *sc_list = get_sc_list(SCHED_STOP_LIST);
+ struct ipu_psys_ppg *kppg;
+ bool resched = false;
+
+ mutex_lock(&sc_list->lock);
+ if (list_empty(&sc_list->list)) {
+ /* some ppgs are RESUMING/STARTING */
+ dev_dbg(&psys->adev->dev, "no candidated stop ppg\n");
+ mutex_unlock(&sc_list->lock);
+ return false;
+ }
+ kppg = list_first_entry(&sc_list->list, struct ipu_psys_ppg,
+ sched_list);
+ mutex_unlock(&sc_list->lock);
+
+ mutex_lock(&kppg->mutex);
+ if (!(kppg->state & PPG_STATE_STOP)) {
+ dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+ __func__, kppg, kppg->state, PPG_STATE_SUSPEND);
+ kppg->state = PPG_STATE_SUSPEND;
+ resched = true;
+ }
+ mutex_unlock(&kppg->mutex);
+
+ return resched;
+}
+
+/*
+ * search all kppgs and sort them into start_list and stop_list, alway start
+ * first kppg(high priority) in start_list;
+ * if there is resource contention, it would switch kppgs in stop_list
+ * to suspend state one by one
+ */
+static bool ipu_psys_scheduler_ppg_start(struct ipu_psys *psys)
+{
+ struct sched_list *sc_list = get_sc_list(SCHED_START_LIST);
+ struct ipu_psys_ppg *kppg, *kppg0;
+ bool stopping_existed = false;
+ int ret;
+
+ ipu_psys_scheduler_ppg_sort(psys, &stopping_existed);
+
+ mutex_lock(&sc_list->lock);
+ if (list_empty(&sc_list->list)) {
+ dev_dbg(&psys->adev->dev, "no ppg to start\n");
+ mutex_unlock(&sc_list->lock);
+ return false;
+ }
+
+ list_for_each_entry_safe(kppg, kppg0,
+ &sc_list->list, sched_list) {
+ mutex_unlock(&sc_list->lock);
+
+ ret = ipu_psys_detect_resource_contention(kppg);
+ if (ret < 0) {
+ dev_dbg(&psys->adev->dev,
+ "ppg %d resource detect failed(%d)\n",
+ kppg->kpg->pg->ID, ret);
+ /*
+ * switch out other ppg in 2 cases:
+ * 1. resource contention
+ * 2. no suspending/stopping ppg
+ */
+ if (ret == -ENOSPC) {
+ if (!stopping_existed &&
+ ipu_psys_scheduler_switch_ppg(psys)) {
+ return true;
+ }
+ dev_dbg(&psys->adev->dev,
+ "ppg is suspending/stopping\n");
+ } else {
+ dev_err(&psys->adev->dev,
+ "detect resource error %d\n", ret);
+ }
+ } else {
+ kppg->pri_dynamic = 0;
+
+ mutex_lock(&kppg->mutex);
+ if (kppg->state == PPG_STATE_START)
+ ipu_psys_ppg_start(kppg);
+ else
+ ipu_psys_ppg_resume(kppg);
+ mutex_unlock(&kppg->mutex);
+
+ ipu_psys_scheduler_remove_kppg(kppg,
+ SCHED_START_LIST);
+ ipu_psys_scheduler_update_start_ppg_priority();
+ }
+ mutex_lock(&sc_list->lock);
+ }
+ mutex_unlock(&sc_list->lock);
+
+ return false;
+}
+
+static bool ipu_psys_scheduler_ppg_enqueue_bufset(struct ipu_psys *psys)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg;
+ struct ipu_psys_fh *fh;
+ bool resched = false;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry(kppg, &sched->ppgs, list) {
+ if (ipu_psys_ppg_enqueue_bufsets(kppg))
+ resched = true;
+ }
+ mutex_unlock(&fh->mutex);
+ }
+
+ return resched;
+}
+
+/*
+ * This function will check all kppgs within fhs, and if kppg state
+ * is STOP or SUSPEND, l-scheduler will call ppg function to stop
+ * or suspend it and update stop list
+ */
+
+static bool ipu_psys_scheduler_ppg_halt(struct ipu_psys *psys)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+ bool stopping_exit = false;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ if (kppg->state & PPG_STATE_STOP) {
+ ipu_psys_ppg_stop(kppg);
+ ipu_psys_scheduler_remove_kppg(kppg,
+ SCHED_STOP_LIST);
+ } else if (kppg->state == PPG_STATE_SUSPEND) {
+ ipu_psys_ppg_suspend(kppg);
+ ipu_psys_scheduler_remove_kppg(kppg,
+ SCHED_STOP_LIST);
+ } else if (kppg->state == PPG_STATE_SUSPENDING ||
+ kppg->state == PPG_STATE_STOPPING) {
+ stopping_exit = true;
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+ return stopping_exit;
+}
+
+static void ipu_psys_update_ppg_state_by_kcmd(struct ipu_psys *psys,
+ struct ipu_psys_ppg *kppg,
+ struct ipu_psys_kcmd *kcmd)
+{
+ int old_ppg_state = kppg->state;
+
+ /*
+ * Respond kcmd when ppg is in stable state:
+ * STARTED/RESUMED/RUNNING/SUSPENDED/STOPPED
+ */
+ if (kppg->state == PPG_STATE_STARTED ||
+ kppg->state == PPG_STATE_RESUMED ||
+ kppg->state == PPG_STATE_RUNNING) {
+ if (kcmd->state == KCMD_STATE_PPG_START)
+ ipu_psys_kcmd_complete(kppg, kcmd, 0);
+ else if (kcmd->state == KCMD_STATE_PPG_STOP)
+ kppg->state = PPG_STATE_STOP;
+ } else if (kppg->state == PPG_STATE_SUSPENDED) {
+ if (kcmd->state == KCMD_STATE_PPG_START)
+ ipu_psys_kcmd_complete(kppg, kcmd, 0);
+ else if (kcmd->state == KCMD_STATE_PPG_STOP)
+ /*
+ * Record the previous state
+ * because here need resume at first
+ */
+ kppg->state |= PPG_STATE_STOP;
+ else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE)
+ kppg->state = PPG_STATE_RESUME;
+ } else if (kppg->state == PPG_STATE_STOPPED) {
+ if (kcmd->state == KCMD_STATE_PPG_START)
+ kppg->state = PPG_STATE_START;
+ else if (kcmd->state == KCMD_STATE_PPG_STOP)
+ ipu_psys_kcmd_complete(kppg, kcmd, 0);
+ else if (kcmd->state == KCMD_STATE_PPG_ENQUEUE) {
+ dev_err(&psys->adev->dev, "ppg %p stopped!\n", kppg);
+ ipu_psys_kcmd_complete(kppg, kcmd, -EIO);
+ }
+ }
+
+ if (old_ppg_state != kppg->state)
+ dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+ __func__, kppg, old_ppg_state, kppg->state);
+}
+
+static void ipu_psys_scheduler_kcmd_set(struct ipu_psys *psys)
+{
+ struct ipu_psys_kcmd *kcmd;
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ if (list_empty(&kppg->kcmds_new_list)) {
+ mutex_unlock(&kppg->mutex);
+ continue;
+ };
+
+ kcmd = list_first_entry(&kppg->kcmds_new_list,
+ struct ipu_psys_kcmd, list);
+ ipu_psys_update_ppg_state_by_kcmd(psys, kppg, kcmd);
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+}
+
+static bool is_ready_to_enter_power_gating(struct ipu_psys *psys)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ if (!list_empty(&kppg->kcmds_new_list) ||
+ !list_empty(&kppg->kcmds_processing_list)) {
+ mutex_unlock(&kppg->mutex);
+ mutex_unlock(&fh->mutex);
+ return false;
+ }
+ if (!(kppg->state == PPG_STATE_RUNNING ||
+ kppg->state == PPG_STATE_STOPPED ||
+ kppg->state == PPG_STATE_SUSPENDED)) {
+ mutex_unlock(&kppg->mutex);
+ mutex_unlock(&fh->mutex);
+ return false;
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+
+ return true;
+}
+
+static bool has_pending_kcmd(struct ipu_psys *psys)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ if (!list_empty(&kppg->kcmds_new_list) ||
+ !list_empty(&kppg->kcmds_processing_list)) {
+ mutex_unlock(&kppg->mutex);
+ mutex_unlock(&fh->mutex);
+ return true;
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+
+ return false;
+}
+
+static bool ipu_psys_scheduler_exit_power_gating(struct ipu_psys *psys)
+{
+ /* Assume power gating process can be aborted directly during START */
+ if (psys->power_gating == PSYS_POWER_GATED) {
+ dev_dbg(&psys->adev->dev, "powergating: exit ---\n");
+ ipu_psys_exit_power_gating(psys);
+ }
+ psys->power_gating = PSYS_POWER_NORMAL;
+ return false;
+}
+
+static bool ipu_psys_scheduler_enter_power_gating(struct ipu_psys *psys)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+
+ if (!enable_power_gating)
+ return false;
+
+ if (psys->power_gating == PSYS_POWER_NORMAL &&
+ is_ready_to_enter_power_gating(psys)) {
+ /* Enter power gating */
+ dev_dbg(&psys->adev->dev, "powergating: enter +++\n");
+ psys->power_gating = PSYS_POWER_GATING;
+ }
+
+ if (psys->power_gating != PSYS_POWER_GATING)
+ return false;
+
+ /* Suspend ppgs one by one */
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ if (kppg->state == PPG_STATE_RUNNING) {
+ kppg->state = PPG_STATE_SUSPEND;
+ mutex_unlock(&kppg->mutex);
+ mutex_unlock(&fh->mutex);
+ return true;
+ }
+
+ if (kppg->state != PPG_STATE_SUSPENDED &&
+ kppg->state != PPG_STATE_STOPPED) {
+ /* Can't enter power gating */
+ mutex_unlock(&kppg->mutex);
+ mutex_unlock(&fh->mutex);
+ /* Need re-run l-scheduler to suspend ppg? */
+ return (kppg->state & PPG_STATE_STOP ||
+ kppg->state == PPG_STATE_SUSPEND);
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+
+ psys->power_gating = PSYS_POWER_GATED;
+ ipu_psys_enter_power_gating(psys);
+
+ return false;
+}
+
+void ipu_psys_run_next(struct ipu_psys *psys)
+{
+ /* Wake up scheduler due to unfinished work */
+ bool need_trigger = false;
+ /* Wait FW callback if there are stopping/suspending/running ppg */
+ bool wait_fw_finish = false;
+ /*
+ * Code below will crash if fhs is empty. Normally this
+ * shouldn't happen.
+ */
+ if (list_empty(&psys->fhs)) {
+ WARN_ON(1);
+ return;
+ }
+
+ /* Abort power gating process */
+ if (psys->power_gating != PSYS_POWER_NORMAL &&
+ has_pending_kcmd(psys))
+ need_trigger = ipu_psys_scheduler_exit_power_gating(psys);
+
+ /* Handle kcmd and related ppg switch */
+ if (psys->power_gating == PSYS_POWER_NORMAL) {
+ ipu_psys_scheduler_kcmd_set(psys);
+ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys);
+ need_trigger |= ipu_psys_scheduler_ppg_start(psys);
+ need_trigger |= ipu_psys_scheduler_ppg_enqueue_bufset(psys);
+ }
+ if (!(need_trigger || wait_fw_finish)) {
+ /* Nothing to do, enter power gating */
+ need_trigger = ipu_psys_scheduler_enter_power_gating(psys);
+ if (psys->power_gating == PSYS_POWER_GATING)
+ wait_fw_finish = ipu_psys_scheduler_ppg_halt(psys);
+ }
+
+ if (need_trigger && !wait_fw_finish) {
+ dev_dbg(&psys->adev->dev, "scheduler: wake up\n");
+ atomic_set(&psys->wakeup_count, 1);
+ wake_up_interruptible(&psys->sched_cmd_wq);
+ }
+}
new file mode 100644
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU6_PLATFORM_RESOURCES_H
+#define IPU6_PLATFORM_RESOURCES_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include "ipu-platform-resources.h"
+
+#define IPU6_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 0
+
+enum {
+ IPU6_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0,
+ IPU6_FW_PSYS_CMD_QUEUE_DEVICE_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG6_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG7_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG8_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG9_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG10_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG11_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG12_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG13_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG14_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG15_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG16_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG17_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG18_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG19_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG20_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG21_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG22_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG23_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG24_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG25_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG26_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG27_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG28_COMMAND_ID,
+ IPU6_FW_PSYS_CMD_QUEUE_PPG29_COMMAND_ID,
+ IPU6_FW_PSYS_N_PSYS_CMD_QUEUE_ID
+};
+
+enum {
+ IPU6_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_TYPE_ID,
+ IPU6_FW_PSYS_LB_VMEM_TYPE_ID,
+ IPU6_FW_PSYS_DMEM_TYPE_ID,
+ IPU6_FW_PSYS_VMEM_TYPE_ID,
+ IPU6_FW_PSYS_BAMEM_TYPE_ID,
+ IPU6_FW_PSYS_PMEM_TYPE_ID,
+ IPU6_FW_PSYS_N_MEM_TYPE_ID
+};
+
+enum ipu6_mem_id {
+ IPU6_FW_PSYS_VMEM0_ID = 0, /* ISP0 VMEM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID, /* TRANSFER VMEM 0 */
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID, /* TRANSFER VMEM 1 */
+ IPU6_FW_PSYS_LB_VMEM_ID, /* LB VMEM */
+ IPU6_FW_PSYS_BAMEM0_ID, /* ISP0 BAMEM */
+ IPU6_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */
+ IPU6_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */
+ IPU6_FW_PSYS_DMEM2_ID, /* SPP1 Dmem */
+ IPU6_FW_PSYS_DMEM3_ID, /* ISP0 Dmem */
+ IPU6_FW_PSYS_PMEM0_ID, /* ISP0 PMEM */
+ IPU6_FW_PSYS_N_MEM_ID
+};
+
+enum {
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0,
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID,
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID,
+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_ID,
+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_ID,
+ IPU6_FW_PSYS_N_DEV_CHN_ID
+};
+
+enum {
+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID = 0,
+ IPU6_FW_PSYS_SP_SERVER_TYPE_ID,
+ IPU6_FW_PSYS_VP_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+ IPU6_FW_PSYS_GDC_TYPE_ID,
+ IPU6_FW_PSYS_TNR_TYPE_ID,
+ IPU6_FW_PSYS_N_CELL_TYPE_ID
+};
+
+enum {
+ IPU6_FW_PSYS_SP0_ID = 0,
+ IPU6_FW_PSYS_VP0_ID,
+ IPU6_FW_PSYS_PSA_ACC_BNLM_ID,
+ IPU6_FW_PSYS_PSA_ACC_DM_ID,
+ IPU6_FW_PSYS_PSA_ACC_ACM_ID,
+ IPU6_FW_PSYS_PSA_ACC_GTC_YUV1_ID,
+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID,
+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID,
+ IPU6_FW_PSYS_BB_ACC_OFS_PIN_PP_ID,
+ IPU6_FW_PSYS_PSA_ACC_GAMMASTAR_ID,
+ IPU6_FW_PSYS_PSA_ACC_GLTM_ID,
+ IPU6_FW_PSYS_PSA_ACC_XNR_ID,
+ IPU6_FW_PSYS_PSA_VCSC_ID, /* VCSC */
+ IPU6_FW_PSYS_ISA_ICA_ID,
+ IPU6_FW_PSYS_ISA_LSC_ID,
+ IPU6_FW_PSYS_ISA_DPC_ID,
+ IPU6_FW_PSYS_ISA_SIS_A_ID,
+ IPU6_FW_PSYS_ISA_SIS_B_ID,
+ IPU6_FW_PSYS_ISA_B2B_ID,
+ IPU6_FW_PSYS_ISA_B2R_R2I_SIE_ID,
+ IPU6_FW_PSYS_ISA_R2I_DS_A_ID,
+ IPU6_FW_PSYS_ISA_R2I_DS_B_ID,
+ IPU6_FW_PSYS_ISA_AWB_ID,
+ IPU6_FW_PSYS_ISA_AE_ID,
+ IPU6_FW_PSYS_ISA_AF_ID,
+ IPU6_FW_PSYS_ISA_DOL_ID,
+ IPU6_FW_PSYS_ISA_ICA_MEDIUM_ID,
+ IPU6_FW_PSYS_ISA_X2B_MD_ID,
+ IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID,
+ IPU6_FW_PSYS_ISA_PAF_ID,
+ IPU6_FW_PSYS_BB_ACC_GDC0_ID,
+ IPU6_FW_PSYS_BB_ACC_TNR_ID,
+ IPU6_FW_PSYS_N_CELL_ID
+};
+
+enum {
+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID = 0,
+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID,
+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID,
+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID,
+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID,
+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID,
+};
+
+/* Excluding PMEM */
+#define IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6_FW_PSYS_N_MEM_TYPE_ID - 1)
+#define IPU6_FW_PSYS_N_DEV_DFM_ID \
+ (IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID + 1)
+
+#define IPU6_FW_PSYS_VMEM0_MAX_SIZE 0x0800
+/* Transfer VMEM0 words, ref HAS Transfer*/
+#define IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800
+/* Transfer VMEM1 words, ref HAS Transfer*/
+#define IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE 0x0800
+#define IPU6_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */
+#define IPU6_FW_PSYS_BAMEM0_MAX_SIZE 0x0800
+#define IPU6_FW_PSYS_DMEM0_MAX_SIZE 0x4000
+#define IPU6_FW_PSYS_DMEM1_MAX_SIZE 0x1000
+#define IPU6_FW_PSYS_DMEM2_MAX_SIZE 0x1000
+#define IPU6_FW_PSYS_DMEM3_MAX_SIZE 0x1000
+#define IPU6_FW_PSYS_PMEM0_MAX_SIZE 0x0500
+
+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 30
+#define IPU6_FW_PSYS_DEV_CHN_GDC_MAX_SIZE 0
+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 30
+#define IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 43
+#define IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE 8
+#define IPU6_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0
+#define IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2
+
+#define IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE 32
+#define IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32
+#define IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32
+#define IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE 32
+#define IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32
+#define IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32
+
+struct ipu6_fw_psys_program_manifest_ext {
+ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+ u16 ext_mem_size[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+ u16 dev_chn_size[IPU6_FW_PSYS_N_DEV_CHN_ID];
+ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID];
+ u8 is_dfm_relocatable[IPU6_FW_PSYS_N_DEV_DFM_ID];
+ u8 dec_resources_input[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
+ u8 dec_resources_input_terminal[IPU_FW_PSYS_MAX_INPUT_DEC_RESOURCES];
+ u8 dec_resources_output[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
+ u8 dec_resources_output_terminal[IPU_FW_PSYS_MAX_OUTPUT_DEC_RESOURCES];
+ u8 padding[IPU_FW_PSYS_N_PADDING_UINT8_IN_PROGRAM_MANIFEST_EXT];
+};
+
+struct ipu6_fw_psys_process_ext {
+ u32 dfm_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+ u32 dfm_active_port_bitmap[IPU6_FW_PSYS_N_DEV_DFM_ID];
+ u16 ext_mem_offset[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+ u16 dev_chn_offset[IPU6_FW_PSYS_N_DEV_CHN_ID];
+ u8 ext_mem_id[IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID];
+};
+
+#endif /* IPU6_PLATFORM_RESOURCES_H */
new file mode 100644
@@ -0,0 +1,560 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/cacheflush.h>
+
+#include "ipu6-ppg.h"
+
+static bool enable_suspend_resume;
+module_param(enable_suspend_resume, bool, 0664);
+MODULE_PARM_DESC(enable_suspend_resume, "enable fw ppg suspend/resume api");
+
+static struct ipu_psys_kcmd *
+ipu_psys_ppg_get_kcmd(struct ipu_psys_ppg *kppg, enum ipu_psys_cmd_state state)
+{
+ struct ipu_psys_kcmd *kcmd;
+
+ if (list_empty(&kppg->kcmds_new_list))
+ return NULL;
+
+ list_for_each_entry(kcmd, &kppg->kcmds_new_list, list) {
+ if (kcmd->state == state)
+ return kcmd;
+ }
+
+ return NULL;
+}
+
+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys_kcmd *kcmd;
+
+ WARN(!mutex_is_locked(&kppg->mutex), "ppg locking error");
+
+ if (list_empty(&kppg->kcmds_processing_list))
+ return NULL;
+
+ list_for_each_entry(kcmd, &kppg->kcmds_processing_list, list) {
+ if (kcmd->state == KCMD_STATE_PPG_STOP)
+ return kcmd;
+ }
+
+ return NULL;
+}
+
+static struct ipu_psys_buffer_set *
+__get_buf_set(struct ipu_psys_fh *fh, size_t buf_set_size)
+{
+ struct ipu_psys_buffer_set *kbuf_set;
+ struct ipu_psys_scheduler *sched = &fh->sched;
+
+ mutex_lock(&sched->bs_mutex);
+ list_for_each_entry(kbuf_set, &sched->buf_sets, list) {
+ if (!kbuf_set->buf_set_size &&
+ kbuf_set->size >= buf_set_size) {
+ kbuf_set->buf_set_size = buf_set_size;
+ mutex_unlock(&sched->bs_mutex);
+ return kbuf_set;
+ }
+ }
+
+ mutex_unlock(&sched->bs_mutex);
+ /* no suitable buffer available, allocate new one */
+ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL);
+ if (!kbuf_set)
+ return NULL;
+
+ kbuf_set->kaddr = dma_alloc_attrs(&fh->psys->adev->dev,
+ buf_set_size, &kbuf_set->dma_addr,
+ GFP_KERNEL, 0);
+ if (!kbuf_set->kaddr) {
+ kfree(kbuf_set);
+ return NULL;
+ }
+
+ kbuf_set->buf_set_size = buf_set_size;
+ kbuf_set->size = buf_set_size;
+ mutex_lock(&sched->bs_mutex);
+ list_add(&kbuf_set->list, &sched->buf_sets);
+ mutex_unlock(&sched->bs_mutex);
+
+ return kbuf_set;
+}
+
+static struct ipu_psys_buffer_set *
+ipu_psys_create_buffer_set(struct ipu_psys_kcmd *kcmd,
+ struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys_fh *fh = kcmd->fh;
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_buffer_set *kbuf_set;
+ size_t buf_set_size;
+ u32 *keb;
+
+ buf_set_size = ipu_fw_psys_ppg_get_buffer_set_size(kcmd);
+
+ kbuf_set = __get_buf_set(fh, buf_set_size);
+ if (!kbuf_set) {
+ dev_err(&psys->adev->dev, "failed to create buffer set\n");
+ return NULL;
+ }
+
+ kbuf_set->buf_set = ipu_fw_psys_ppg_create_buffer_set(kcmd,
+ kbuf_set->kaddr,
+ 0);
+
+ ipu_fw_psys_ppg_buffer_set_vaddress(kbuf_set->buf_set,
+ kbuf_set->dma_addr);
+ keb = kcmd->kernel_enable_bitmap;
+ ipu_fw_psys_ppg_buffer_set_set_kernel_enable_bitmap(kbuf_set->buf_set,
+ keb);
+
+ return kbuf_set;
+}
+
+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd,
+ struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys *psys = kppg->fh->psys;
+ struct ipu_psys_buffer_set *kbuf_set;
+ unsigned int i;
+ int ret;
+
+ kbuf_set = ipu_psys_create_buffer_set(kcmd, kppg);
+ if (!kbuf_set) {
+ ret = -EINVAL;
+ goto error;
+ }
+ kcmd->kbuf_set = kbuf_set;
+ kbuf_set->kcmd = kcmd;
+
+ for (i = 0; i < kcmd->nbuffers; i++) {
+ struct ipu_fw_psys_terminal *terminal;
+ u32 buffer;
+
+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i);
+ if (!terminal)
+ continue;
+
+ buffer = (u32)kcmd->kbufs[i]->dma_addr +
+ kcmd->buffers[i].data_offset;
+
+ ret = ipu_fw_psys_ppg_set_buffer_set(kcmd, terminal, i, buffer);
+ if (ret) {
+ dev_err(&psys->adev->dev, "Unable to set bufset\n");
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ dev_err(&psys->adev->dev, "failed to get buffer set\n");
+ return ret;
+}
+
+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg)
+{
+ u8 queue_id;
+ int old_ppg_state;
+
+ if (!psys || !kppg)
+ return;
+
+ mutex_lock(&kppg->mutex);
+ old_ppg_state = kppg->state;
+ if (kppg->state == PPG_STATE_STOPPING) {
+ struct ipu_psys_kcmd tmp_kcmd = {
+ .kpg = kppg->kpg,
+ };
+
+ kppg->state = PPG_STATE_STOPPED;
+ ipu_psys_free_resources(&kppg->kpg->resource_alloc,
+ &psys->resource_pool_running);
+ queue_id = ipu_fw_psys_ppg_get_base_queue_id(&tmp_kcmd);
+ ipu_psys_free_cmd_queue_resource(&psys->resource_pool_running,
+ queue_id);
+ pm_runtime_put(&psys->adev->dev);
+ } else {
+ if (kppg->state == PPG_STATE_SUSPENDING) {
+ kppg->state = PPG_STATE_SUSPENDED;
+ ipu_psys_free_resources(&kppg->kpg->resource_alloc,
+ &psys->resource_pool_running);
+ } else if (kppg->state == PPG_STATE_STARTED ||
+ kppg->state == PPG_STATE_RESUMED) {
+ kppg->state = PPG_STATE_RUNNING;
+ }
+
+ /* Kick l-scheduler thread for FW callback,
+ * also for checking if need to enter power gating
+ */
+ atomic_set(&psys->wakeup_count, 1);
+ wake_up_interruptible(&psys->sched_cmd_wq);
+ }
+ if (old_ppg_state != kppg->state)
+ dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+ __func__, kppg, old_ppg_state, kppg->state);
+
+ mutex_unlock(&kppg->mutex);
+}
+
+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys *psys = kppg->fh->psys;
+ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg,
+ KCMD_STATE_PPG_START);
+ unsigned int i;
+ int ret;
+
+ if (!kcmd) {
+ dev_err(&psys->adev->dev, "failed to find start kcmd!\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(&psys->adev->dev, "start ppg id %d, addr 0x%p\n",
+ ipu_fw_psys_pg_get_id(kcmd), kppg);
+
+ kppg->state = PPG_STATE_STARTING;
+ for (i = 0; i < kcmd->nbuffers; i++) {
+ struct ipu_fw_psys_terminal *terminal;
+
+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i);
+ if (!terminal)
+ continue;
+
+ ret = ipu_fw_psys_terminal_set(terminal, i, kcmd, 0,
+ kcmd->buffers[i].len);
+ if (ret) {
+ dev_err(&psys->adev->dev, "Unable to set terminal\n");
+ return ret;
+ }
+ }
+
+ ret = ipu_fw_psys_pg_submit(kcmd);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to submit kcmd!\n");
+ return ret;
+ }
+
+ ret = ipu_psys_allocate_resources(&psys->adev->dev,
+ kcmd->kpg->pg,
+ kcmd->pg_manifest,
+ &kcmd->kpg->resource_alloc,
+ &psys->resource_pool_running);
+ if (ret) {
+ dev_err(&psys->adev->dev, "alloc resources failed!\n");
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(&psys->adev->dev);
+ if (ret < 0) {
+ dev_err(&psys->adev->dev, "failed to power on psys\n");
+ goto error;
+ }
+
+ ret = ipu_psys_kcmd_start(psys, kcmd);
+ if (ret) {
+ ipu_psys_kcmd_complete(kppg, kcmd, -EIO);
+ goto error;
+ }
+
+ dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+ __func__, kppg, kppg->state, PPG_STATE_STARTED);
+ kppg->state = PPG_STATE_STARTED;
+ ipu_psys_kcmd_complete(kppg, kcmd, 0);
+
+ return 0;
+
+error:
+ pm_runtime_put_noidle(&psys->adev->dev);
+ ipu_psys_reset_process_cell(&psys->adev->dev,
+ kcmd->kpg->pg,
+ kcmd->pg_manifest,
+ kcmd->kpg->pg->process_count);
+ ipu_psys_free_resources(&kppg->kpg->resource_alloc,
+ &psys->resource_pool_running);
+
+ dev_err(&psys->adev->dev, "failed to start ppg\n");
+ return ret;
+}
+
+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys *psys = kppg->fh->psys;
+ struct ipu_psys_kcmd tmp_kcmd = {
+ .kpg = kppg->kpg,
+ .fh = kppg->fh,
+ };
+ int ret;
+
+ dev_dbg(&psys->adev->dev, "resume ppg id %d, addr 0x%p\n",
+ ipu_fw_psys_pg_get_id(&tmp_kcmd), kppg);
+
+ kppg->state = PPG_STATE_RESUMING;
+ if (enable_suspend_resume) {
+ ret = ipu_psys_allocate_resources(&psys->adev->dev,
+ kppg->kpg->pg,
+ kppg->manifest,
+ &kppg->kpg->resource_alloc,
+ &psys->resource_pool_running);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to allocate res\n");
+ return -EIO;
+ }
+
+ ret = ipu_fw_psys_ppg_resume(&tmp_kcmd);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to resume ppg\n");
+ goto error;
+ }
+ } else {
+ kppg->kpg->pg->state = IPU_FW_PSYS_PROCESS_GROUP_READY;
+ ret = ipu_fw_psys_pg_submit(&tmp_kcmd);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to submit kcmd!\n");
+ return ret;
+ }
+
+ ret = ipu_psys_allocate_resources(&psys->adev->dev,
+ kppg->kpg->pg,
+ kppg->manifest,
+ &kppg->kpg->resource_alloc,
+ &psys->resource_pool_running);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to allocate res\n");
+ return ret;
+ }
+
+ ret = ipu_psys_kcmd_start(psys, &tmp_kcmd);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to start kcmd!\n");
+ goto error;
+ }
+ }
+ dev_dbg(&psys->adev->dev, "s_change:%s: %p %d -> %d\n",
+ __func__, kppg, kppg->state, PPG_STATE_RESUMED);
+ kppg->state = PPG_STATE_RESUMED;
+
+ return 0;
+
+error:
+ ipu_psys_reset_process_cell(&psys->adev->dev,
+ kppg->kpg->pg,
+ kppg->manifest,
+ kppg->kpg->pg->process_count);
+ ipu_psys_free_resources(&kppg->kpg->resource_alloc,
+ &psys->resource_pool_running);
+
+ return ret;
+}
+
+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys_kcmd *kcmd = ipu_psys_ppg_get_kcmd(kppg,
+ KCMD_STATE_PPG_STOP);
+ struct ipu_psys *psys = kppg->fh->psys;
+ struct ipu_psys_kcmd kcmd_temp;
+ int ppg_id, ret = 0;
+
+ if (kcmd) {
+ list_move_tail(&kcmd->list, &kppg->kcmds_processing_list);
+ } else {
+ dev_dbg(&psys->adev->dev, "Exceptional stop happened!\n");
+ kcmd_temp.kpg = kppg->kpg;
+ kcmd_temp.fh = kppg->fh;
+ kcmd = &kcmd_temp;
+ /* delete kppg in stop list to avoid this ppg resuming */
+ ipu_psys_scheduler_remove_kppg(kppg, SCHED_STOP_LIST);
+ }
+
+ ppg_id = ipu_fw_psys_pg_get_id(kcmd);
+ dev_dbg(&psys->adev->dev, "stop ppg(%d, addr 0x%p)\n", ppg_id, kppg);
+
+ if (kppg->state & PPG_STATE_SUSPENDED) {
+ if (enable_suspend_resume) {
+ dev_dbg(&psys->adev->dev, "need resume before stop!\n");
+ kcmd_temp.kpg = kppg->kpg;
+ kcmd_temp.fh = kppg->fh;
+ ret = ipu_fw_psys_ppg_resume(&kcmd_temp);
+ if (ret)
+ dev_err(&psys->adev->dev,
+ "ppg(%d) failed to resume\n", ppg_id);
+ } else if (kcmd != &kcmd_temp) {
+ ipu_psys_free_cmd_queue_resource(
+ &psys->resource_pool_running,
+ ipu_fw_psys_ppg_get_base_queue_id(kcmd));
+ ipu_psys_kcmd_complete(kppg, kcmd, 0);
+ dev_dbg(&psys->adev->dev,
+ "s_change:%s %p %d -> %d\n", __func__,
+ kppg, kppg->state, PPG_STATE_STOPPED);
+ pm_runtime_put(&psys->adev->dev);
+ kppg->state = PPG_STATE_STOPPED;
+ return 0;
+ } else {
+ return 0;
+ }
+ }
+ dev_dbg(&psys->adev->dev, "s_change:%s %p %d -> %d\n",
+ __func__, kppg, kppg->state, PPG_STATE_STOPPING);
+ kppg->state = PPG_STATE_STOPPING;
+ ret = ipu_fw_psys_pg_abort(kcmd);
+ if (ret)
+ dev_err(&psys->adev->dev, "ppg(%d) failed to abort\n", ppg_id);
+
+ return ret;
+}
+
+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys *psys = kppg->fh->psys;
+ struct ipu_psys_kcmd tmp_kcmd = {
+ .kpg = kppg->kpg,
+ .fh = kppg->fh,
+ };
+ int ppg_id = ipu_fw_psys_pg_get_id(&tmp_kcmd);
+ int ret = 0;
+
+ dev_dbg(&psys->adev->dev, "suspend ppg(%d, addr 0x%p)\n", ppg_id, kppg);
+
+ dev_dbg(&psys->adev->dev, "s_change:%s %p %d -> %d\n",
+ __func__, kppg, kppg->state, PPG_STATE_SUSPENDING);
+ kppg->state = PPG_STATE_SUSPENDING;
+ if (enable_suspend_resume)
+ ret = ipu_fw_psys_ppg_suspend(&tmp_kcmd);
+ else
+ ret = ipu_fw_psys_pg_abort(&tmp_kcmd);
+ if (ret)
+ dev_err(&psys->adev->dev, "failed to %s ppg(%d)\n",
+ enable_suspend_resume ? "suspend" : "stop", ret);
+
+ return ret;
+}
+
+static bool ipu_psys_ppg_is_bufset_existing(struct ipu_psys_ppg *kppg)
+{
+ return !list_empty(&kppg->kcmds_new_list);
+}
+
+/*
+ * ipu_psys_ppg_enqueue_bufsets - enqueue buffer sets to firmware
+ * Sometimes, if the ppg is at suspended state, this function will return true
+ * to reschedule and let the resume command scheduled before the buffer sets
+ * enqueuing.
+ */
+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg)
+{
+ struct ipu_psys_kcmd *kcmd, *kcmd0;
+ struct ipu_psys *psys = kppg->fh->psys;
+ bool need_resume = false;
+
+ mutex_lock(&kppg->mutex);
+
+ if (kppg->state & (PPG_STATE_STARTED | PPG_STATE_RESUMED |
+ PPG_STATE_RUNNING)) {
+ if (ipu_psys_ppg_is_bufset_existing(kppg)) {
+ list_for_each_entry_safe(kcmd, kcmd0,
+ &kppg->kcmds_new_list, list) {
+ int ret;
+
+ if (kcmd->state != KCMD_STATE_PPG_ENQUEUE) {
+ need_resume = true;
+ break;
+ }
+
+ ret = ipu_fw_psys_ppg_enqueue_bufs(kcmd);
+ if (ret) {
+ dev_err(&psys->adev->dev,
+ "kppg 0x%p fail to qbufset %d",
+ kppg, ret);
+ break;
+ }
+ list_move_tail(&kcmd->list,
+ &kppg->kcmds_processing_list);
+ dev_dbg(&psys->adev->dev,
+ "kppg %d %p queue kcmd 0x%p fh 0x%p\n",
+ ipu_fw_psys_pg_get_id(kcmd),
+ kppg, kcmd, kcmd->fh);
+ }
+ }
+ }
+
+ mutex_unlock(&kppg->mutex);
+ return need_resume;
+}
+
+void ipu_psys_enter_power_gating(struct ipu_psys *psys)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+ int ret = 0;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ /*
+ * Only for SUSPENDED kppgs, STOPPED kppgs has already
+ * power down and new kppgs might come now.
+ */
+ if (kppg->state != PPG_STATE_SUSPENDED) {
+ mutex_unlock(&kppg->mutex);
+ continue;
+ }
+
+ ret = pm_runtime_put_autosuspend(&psys->adev->dev);
+ if (ret < 0) {
+ dev_err(&psys->adev->dev,
+ "failed to power gating off\n");
+ pm_runtime_get_sync(&psys->adev->dev);
+
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+}
+
+void ipu_psys_exit_power_gating(struct ipu_psys *psys)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+ int ret = 0;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ mutex_lock(&fh->mutex);
+ sched = &fh->sched;
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ /* Only for SUSPENDED kppgs */
+ if (kppg->state != PPG_STATE_SUSPENDED) {
+ mutex_unlock(&kppg->mutex);
+ continue;
+ }
+
+ ret = pm_runtime_get_sync(&psys->adev->dev);
+ if (ret < 0) {
+ dev_err(&psys->adev->dev,
+ "failed to power gating\n");
+ pm_runtime_put_noidle(&psys->adev->dev);
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+}
new file mode 100644
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Intel Corporation
+ */
+
+#ifndef IPU6_PPG_H
+#define IPU6_PPG_H
+
+#include "ipu-psys.h"
+/* starting from '2' in case of someone passes true or false */
+enum SCHED_LIST {
+ SCHED_START_LIST = 2,
+ SCHED_STOP_LIST
+};
+
+enum ipu_psys_power_gating_state {
+ PSYS_POWER_NORMAL = 0,
+ PSYS_POWER_GATING,
+ PSYS_POWER_GATED
+};
+
+int ipu_psys_ppg_get_bufset(struct ipu_psys_kcmd *kcmd,
+ struct ipu_psys_ppg *kppg);
+struct ipu_psys_kcmd *ipu_psys_ppg_get_stop_kcmd(struct ipu_psys_ppg *kppg);
+void ipu_psys_scheduler_remove_kppg(struct ipu_psys_ppg *kppg,
+ enum SCHED_LIST type);
+void ipu_psys_scheduler_add_kppg(struct ipu_psys_ppg *kppg,
+ enum SCHED_LIST type);
+int ipu_psys_ppg_start(struct ipu_psys_ppg *kppg);
+int ipu_psys_ppg_resume(struct ipu_psys_ppg *kppg);
+int ipu_psys_ppg_stop(struct ipu_psys_ppg *kppg);
+int ipu_psys_ppg_suspend(struct ipu_psys_ppg *kppg);
+void ipu_psys_ppg_complete(struct ipu_psys *psys, struct ipu_psys_ppg *kppg);
+bool ipu_psys_ppg_enqueue_bufsets(struct ipu_psys_ppg *kppg);
+void ipu_psys_enter_power_gating(struct ipu_psys *psys);
+void ipu_psys_exit_power_gating(struct ipu_psys *psys);
+
+#endif /* IPU6_PPG_H */
new file mode 100644
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu-psys.h"
+#include "ipu-platform-regs.h"
+
+/*
+ * GPC (Gerneral Performance Counters)
+ */
+#define IPU_PSYS_GPC_NUM 16
+
+#ifndef CONFIG_PM
+#define pm_runtime_get_sync(d) 0
+#define pm_runtime_put(d) 0
+#endif
+
+struct ipu_psys_gpc {
+ bool enable;
+ unsigned int route;
+ unsigned int source;
+ unsigned int sense;
+ unsigned int gpcindex;
+ void *prit;
+};
+
+struct ipu_psys_gpcs {
+ bool gpc_enable;
+ struct ipu_psys_gpc gpc[IPU_PSYS_GPC_NUM];
+ void *prit;
+};
+
+static int ipu6_psys_gpc_global_enable_get(void *data, u64 *val)
+{
+ struct ipu_psys_gpcs *psys_gpcs = data;
+ struct ipu_psys *psys = psys_gpcs->prit;
+
+ mutex_lock(&psys->mutex);
+
+ *val = psys_gpcs->gpc_enable;
+
+ mutex_unlock(&psys->mutex);
+ return 0;
+}
+
+static int ipu6_psys_gpc_global_enable_set(void *data, u64 val)
+{
+ struct ipu_psys_gpcs *psys_gpcs = data;
+ struct ipu_psys *psys = psys_gpcs->prit;
+ void __iomem *base;
+ int idx, res;
+
+ if (val != 0 && val != 1)
+ return -EINVAL;
+
+ if (!psys || !psys->pdata || !psys->pdata->base)
+ return -EINVAL;
+
+ mutex_lock(&psys->mutex);
+
+ base = psys->pdata->base + IPU_GPC_BASE;
+
+ res = pm_runtime_get_sync(&psys->adev->dev);
+ if (res < 0) {
+ pm_runtime_put(&psys->adev->dev);
+ mutex_unlock(&psys->mutex);
+ return res;
+ }
+
+ if (val == 0) {
+ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST);
+ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE);
+ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET);
+ psys_gpcs->gpc_enable = false;
+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) {
+ psys_gpcs->gpc[idx].enable = 0;
+ psys_gpcs->gpc[idx].sense = 0;
+ psys_gpcs->gpc[idx].route = 0;
+ psys_gpcs->gpc[idx].source = 0;
+ }
+ pm_runtime_mark_last_busy(&psys->adev->dev);
+ pm_runtime_put_autosuspend(&psys->adev->dev);
+ } else {
+ /* Set gpc reg and start all gpc here.
+ * RST free running local timer.
+ */
+ writel(0x0, base + IPU_GPREG_TRACE_TIMER_RST);
+ writel(0x1, base + IPU_GPREG_TRACE_TIMER_RST);
+
+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) {
+ /* Enable */
+ writel(psys_gpcs->gpc[idx].enable,
+ base + IPU_CDC_MMU_GPC_ENABLE0 + 4 * idx);
+ /* Setting (route/source/sense) */
+ writel((psys_gpcs->gpc[idx].sense
+ << IPU_GPC_SENSE_OFFSET)
+ + (psys_gpcs->gpc[idx].route
+ << IPU_GPC_ROUTE_OFFSET)
+ + (psys_gpcs->gpc[idx].source
+ << IPU_GPC_SOURCE_OFFSET),
+ base + IPU_CDC_MMU_GPC_CNT_SEL0 + 4 * idx);
+ }
+
+ /* Soft reset and Overall Enable. */
+ writel(0x0, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE);
+ writel(0xffff, base + IPU_CDC_MMU_GPC_SOFT_RESET);
+ writel(0x1, base + IPU_CDC_MMU_GPC_OVERALL_ENABLE);
+
+ psys_gpcs->gpc_enable = true;
+ }
+
+ mutex_unlock(&psys->mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_globe_enable_fops,
+ ipu6_psys_gpc_global_enable_get,
+ ipu6_psys_gpc_global_enable_set, "%llu\n");
+
+static int ipu6_psys_gpc_count_get(void *data, u64 *val)
+{
+ struct ipu_psys_gpc *psys_gpc = data;
+ struct ipu_psys *psys = psys_gpc->prit;
+ void __iomem *base;
+ int res;
+
+ if (!psys || !psys->pdata || !psys->pdata->base)
+ return -EINVAL;
+
+ mutex_lock(&psys->mutex);
+
+ base = psys->pdata->base + IPU_GPC_BASE;
+
+ res = pm_runtime_get_sync(&psys->adev->dev);
+ if (res < 0) {
+ pm_runtime_put(&psys->adev->dev);
+ mutex_unlock(&psys->mutex);
+ return res;
+ }
+
+ *val = readl(base + IPU_CDC_MMU_GPC_VALUE0 + 4 * psys_gpc->gpcindex);
+
+ mutex_unlock(&psys->mutex);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(psys_gpc_count_fops,
+ ipu6_psys_gpc_count_get,
+ NULL, "%llu\n");
+
+int ipu_psys_gpc_init_debugfs(struct ipu_psys *psys)
+{
+ struct dentry *gpcdir;
+ struct dentry *dir;
+ struct dentry *file;
+ int idx;
+ char gpcname[10];
+ struct ipu_psys_gpcs *psys_gpcs;
+
+ psys_gpcs = devm_kzalloc(&psys->dev, sizeof(*psys_gpcs), GFP_KERNEL);
+ if (!psys_gpcs)
+ return -ENOMEM;
+
+ gpcdir = debugfs_create_dir("gpc", psys->debugfsdir);
+ if (IS_ERR(gpcdir))
+ return -ENOMEM;
+
+ psys_gpcs->prit = psys;
+ file = debugfs_create_file("enable", 0600, gpcdir, psys_gpcs,
+ &psys_gpc_globe_enable_fops);
+ if (IS_ERR(file))
+ goto err;
+
+ for (idx = 0; idx < IPU_PSYS_GPC_NUM; idx++) {
+ sprintf(gpcname, "gpc%d", idx);
+ dir = debugfs_create_dir(gpcname, gpcdir);
+ if (IS_ERR(dir))
+ goto err;
+
+ debugfs_create_bool("enable", 0600, dir,
+ &psys_gpcs->gpc[idx].enable);
+
+ debugfs_create_u32("source", 0600, dir,
+ &psys_gpcs->gpc[idx].source);
+
+ debugfs_create_u32("route", 0600, dir,
+ &psys_gpcs->gpc[idx].route);
+
+ debugfs_create_u32("sense", 0600, dir,
+ &psys_gpcs->gpc[idx].sense);
+
+ psys_gpcs->gpc[idx].gpcindex = idx;
+ psys_gpcs->gpc[idx].prit = psys;
+ file = debugfs_create_file("count", 0400, dir,
+ &psys_gpcs->gpc[idx],
+ &psys_gpc_count_fops);
+ if (IS_ERR(file))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ debugfs_remove_recursive(gpcdir);
+ return -ENOMEM;
+}
+#endif
new file mode 100644
@@ -0,0 +1,1032 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
+#include <linux/init_task.h>
+#include <linux/version.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+
+#include "ipu.h"
+#include "ipu-psys.h"
+#include "ipu6-ppg.h"
+#include "ipu-platform-regs.h"
+#include "ipu-trace.h"
+
+static bool early_pg_transfer;
+module_param(early_pg_transfer, bool, 0664);
+MODULE_PARM_DESC(early_pg_transfer,
+ "Copy PGs back to user after resource allocation");
+
+bool enable_power_gating = true;
+module_param(enable_power_gating, bool, 0664);
+MODULE_PARM_DESC(enable_power_gating, "enable power gating");
+
+struct ipu_trace_block psys_trace_blocks[] = {
+ {
+ .offset = IPU_TRACE_REG_PS_TRACE_UNIT_BASE,
+ .type = IPU_TRACE_BLOCK_TUN,
+ },
+ {
+ .offset = IPU_TRACE_REG_PS_SPC_EVQ_BASE,
+ .type = IPU_TRACE_BLOCK_TM,
+ },
+ {
+ .offset = IPU_TRACE_REG_PS_SPP0_EVQ_BASE,
+ .type = IPU_TRACE_BLOCK_TM,
+ },
+ {
+ .offset = IPU_TRACE_REG_PS_SPC_GPC_BASE,
+ .type = IPU_TRACE_BLOCK_GPC,
+ },
+ {
+ .offset = IPU_TRACE_REG_PS_SPP0_GPC_BASE,
+ .type = IPU_TRACE_BLOCK_GPC,
+ },
+ {
+ .offset = IPU_TRACE_REG_PS_MMU_GPC_BASE,
+ .type = IPU_TRACE_BLOCK_GPC,
+ },
+ {
+ .offset = IPU_TRACE_REG_PS_GPREG_TRACE_TIMER_RST_N,
+ .type = IPU_TRACE_TIMER_RST,
+ },
+ {
+ .type = IPU_TRACE_BLOCK_END,
+ }
+};
+
+static void ipu6_set_sp_info_bits(void *base)
+{
+ int i;
+
+ writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+ base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
+
+ for (i = 0; i < 4; i++)
+ writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+ base + IPU_REG_PSYS_INFO_SEG_CMEM_MASTER(i));
+ for (i = 0; i < 4; i++)
+ writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+ base + IPU_REG_PSYS_INFO_SEG_XMEM_MASTER(i));
+}
+
+#define PSYS_SUBDOMAINS_STATUS_WAIT_COUNT 1000
+void ipu_psys_subdomains_power(struct ipu_psys *psys, bool on)
+{
+ unsigned int i;
+ u32 val;
+
+ /* power domain req */
+ dev_dbg(&psys->adev->dev, "power %s psys sub-domains",
+ on ? "UP" : "DOWN");
+ if (on)
+ writel(IPU_PSYS_SUBDOMAINS_POWER_MASK,
+ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ);
+ else
+ writel(0x0,
+ psys->adev->isp->base + IPU_PSYS_SUBDOMAINS_POWER_REQ);
+
+ i = 0;
+ do {
+ usleep_range(10, 20);
+ val = readl(psys->adev->isp->base +
+ IPU_PSYS_SUBDOMAINS_POWER_STATUS);
+ if (!(val & BIT(31))) {
+ dev_dbg(&psys->adev->dev,
+ "PS sub-domains req done with status 0x%x",
+ val);
+ break;
+ }
+ i++;
+ } while (i < PSYS_SUBDOMAINS_STATUS_WAIT_COUNT);
+
+ if (i == PSYS_SUBDOMAINS_STATUS_WAIT_COUNT)
+ dev_warn(&psys->adev->dev, "Psys sub-domains %s req timeout!",
+ on ? "UP" : "DOWN");
+}
+
+void ipu_psys_setup_hw(struct ipu_psys *psys)
+{
+ void __iomem *base = psys->pdata->base;
+ void __iomem *spc_regs_base =
+ base + psys->pdata->ipdata->hw_variant.spc_offset;
+ void *psys_iommu0_ctrl;
+ u32 irqs;
+ const u8 r3 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R3_SPC_STATUS_REG;
+ const u8 r4 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R4_SPC_MASTER_BASE_ADDR;
+ const u8 r5 = IPU_DEVICE_AB_GROUP1_TARGET_ID_R5_SPC_PC_STALL;
+
+ if (!psys->adev->isp->secure_mode) {
+ /* configure access blocker for non-secure mode */
+ writel(NCI_AB_ACCESS_MODE_RW,
+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r3));
+ writel(NCI_AB_ACCESS_MODE_RW,
+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r4));
+ writel(NCI_AB_ACCESS_MODE_RW,
+ base + IPU_REG_DMA_TOP_AB_GROUP1_BASE_ADDR +
+ IPU_REG_DMA_TOP_AB_RING_ACCESS_OFFSET(r5));
+ }
+ psys_iommu0_ctrl = base +
+ psys->pdata->ipdata->hw_variant.mmu_hw[0].offset +
+ IPU_MMU_INFO_OFFSET;
+ writel(IPU_INFO_REQUEST_DESTINATION_IOSF, psys_iommu0_ctrl);
+
+ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+ ipu6_set_sp_info_bits(spc_regs_base + IPU_PSYS_REG_SPP0_STATUS_CTRL);
+
+ /* Enable FW interrupt #0 */
+ writel(0, base + IPU_REG_PSYS_GPDEV_FWIRQ(0));
+ irqs = IPU_PSYS_GPDEV_IRQ_FWIRQ(IPU_PSYS_GPDEV_FWIRQ0);
+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_EDGE);
+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_LEVEL_NOT_PULSE);
+ writel(0xffffffff, base + IPU_REG_PSYS_GPDEV_IRQ_CLEAR);
+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_MASK);
+ writel(irqs, base + IPU_REG_PSYS_GPDEV_IRQ_ENABLE);
+}
+
+static struct ipu_psys_ppg *ipu_psys_identify_kppg(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_psys_scheduler *sched = &kcmd->fh->sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+
+ mutex_lock(&kcmd->fh->mutex);
+ if (list_empty(&sched->ppgs))
+ goto not_found;
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ if (ipu_fw_psys_pg_get_token(kcmd)
+ != kppg->token)
+ continue;
+ mutex_unlock(&kcmd->fh->mutex);
+ return kppg;
+ }
+
+not_found:
+ mutex_unlock(&kcmd->fh->mutex);
+ return NULL;
+}
+
+/*
+ * Called to free up all resources associated with a kcmd.
+ * After this the kcmd doesn't anymore exist in the driver.
+ */
+static void ipu_psys_kcmd_free(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_psys_ppg *kppg;
+ struct ipu_psys_scheduler *sched;
+
+ if (!kcmd)
+ return;
+
+ kppg = ipu_psys_identify_kppg(kcmd);
+ sched = &kcmd->fh->sched;
+
+ if (kcmd->kbuf_set) {
+ mutex_lock(&sched->bs_mutex);
+ kcmd->kbuf_set->buf_set_size = 0;
+ mutex_unlock(&sched->bs_mutex);
+ kcmd->kbuf_set = NULL;
+ }
+
+ if (kppg) {
+ mutex_lock(&kppg->mutex);
+ if (!list_empty(&kcmd->list))
+ list_del(&kcmd->list);
+ mutex_unlock(&kppg->mutex);
+ }
+
+ kfree(kcmd->pg_manifest);
+ kfree(kcmd->kbufs);
+ kfree(kcmd->buffers);
+ kfree(kcmd);
+}
+
+static struct ipu_psys_kcmd *ipu_psys_copy_cmd(struct ipu_psys_command *cmd,
+ struct ipu_psys_fh *fh)
+{
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_kcmd *kcmd;
+ struct ipu_psys_kbuffer *kpgbuf;
+ unsigned int i;
+ int ret, prevfd, fd;
+
+ fd = prevfd = -1;
+
+ if (cmd->bufcount > IPU_MAX_PSYS_CMD_BUFFERS)
+ return NULL;
+
+ if (!cmd->pg_manifest_size)
+ return NULL;
+
+ kcmd = kzalloc(sizeof(*kcmd), GFP_KERNEL);
+ if (!kcmd)
+ return NULL;
+
+ kcmd->state = KCMD_STATE_PPG_NEW;
+ kcmd->fh = fh;
+ INIT_LIST_HEAD(&kcmd->list);
+
+ mutex_lock(&fh->mutex);
+ fd = cmd->pg;
+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+ if (!kpgbuf || !kpgbuf->sgt) {
+ dev_err(&psys->adev->dev, "%s kbuf %p with fd %d not found.\n",
+ __func__, kpgbuf, fd);
+ mutex_unlock(&fh->mutex);
+ goto error;
+ }
+
+ /* check and remap if possibe */
+ ret = ipu_psys_mapbuf_locked(fd, fh, kpgbuf);
+ if (ret) {
+ dev_err(&psys->adev->dev, "%s remap failed\n", __func__);
+ mutex_unlock(&fh->mutex);
+ goto error;
+ }
+
+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+ if (!kpgbuf || !kpgbuf->sgt) {
+ WARN(1, "kbuf not found or unmapped.\n");
+ mutex_unlock(&fh->mutex);
+ goto error;
+ }
+ mutex_unlock(&fh->mutex);
+
+ kcmd->pg_user = kpgbuf->kaddr;
+ kcmd->kpg = __get_pg_buf(psys, kpgbuf->len);
+ if (!kcmd->kpg)
+ goto error;
+
+ memcpy(kcmd->kpg->pg, kcmd->pg_user, kcmd->kpg->pg_size);
+
+ kcmd->pg_manifest = kzalloc(cmd->pg_manifest_size, GFP_KERNEL);
+ if (!kcmd->pg_manifest)
+ goto error;
+
+ ret = copy_from_user(kcmd->pg_manifest, cmd->pg_manifest,
+ cmd->pg_manifest_size);
+ if (ret)
+ goto error;
+
+ kcmd->pg_manifest_size = cmd->pg_manifest_size;
+
+ kcmd->user_token = cmd->user_token;
+ kcmd->issue_id = cmd->issue_id;
+ kcmd->priority = cmd->priority;
+ if (kcmd->priority >= IPU_PSYS_CMD_PRIORITY_NUM)
+ goto error;
+
+ /*
+ * Kenel enable bitmap be used only.
+ */
+ memcpy(kcmd->kernel_enable_bitmap, cmd->kernel_enable_bitmap,
+ sizeof(cmd->kernel_enable_bitmap));
+
+ kcmd->nbuffers = ipu_fw_psys_pg_get_terminal_count(kcmd);
+ kcmd->buffers = kcalloc(kcmd->nbuffers, sizeof(*kcmd->buffers),
+ GFP_KERNEL);
+ if (!kcmd->buffers)
+ goto error;
+
+ kcmd->kbufs = kcalloc(kcmd->nbuffers, sizeof(kcmd->kbufs[0]),
+ GFP_KERNEL);
+ if (!kcmd->kbufs)
+ goto error;
+
+ /* should be stop cmd for ppg */
+ if (!cmd->buffers) {
+ kcmd->state = KCMD_STATE_PPG_STOP;
+ return kcmd;
+ }
+
+ if (!cmd->bufcount || kcmd->nbuffers > cmd->bufcount)
+ goto error;
+
+ ret = copy_from_user(kcmd->buffers, cmd->buffers,
+ kcmd->nbuffers * sizeof(*kcmd->buffers));
+ if (ret)
+ goto error;
+
+ for (i = 0; i < kcmd->nbuffers; i++) {
+ struct ipu_fw_psys_terminal *terminal;
+
+ terminal = ipu_fw_psys_pg_get_terminal(kcmd, i);
+ if (!terminal)
+ continue;
+
+ if (!(kcmd->buffers[i].flags & IPU_BUFFER_FLAG_DMA_HANDLE)) {
+ kcmd->state = KCMD_STATE_PPG_START;
+ continue;
+ }
+ if (kcmd->state == KCMD_STATE_PPG_START) {
+ dev_err(&psys->adev->dev,
+ "err: all buffer.flags&DMA_HANDLE must 0\n");
+ goto error;
+ }
+
+ mutex_lock(&fh->mutex);
+ fd = kcmd->buffers[i].base.fd;
+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+ if (!kpgbuf || !kpgbuf->sgt) {
+ dev_err(&psys->adev->dev,
+ "%s kcmd->buffers[%d] %p fd %d not found.\n",
+ __func__, i, kpgbuf, fd);
+ mutex_unlock(&fh->mutex);
+ goto error;
+ }
+
+ ret = ipu_psys_mapbuf_locked(fd, fh, kpgbuf);
+ if (ret) {
+ dev_err(&psys->adev->dev, "%s remap failed\n",
+ __func__);
+ mutex_unlock(&fh->mutex);
+ goto error;
+ }
+
+ kpgbuf = ipu_psys_lookup_kbuffer(fh, fd);
+ if (!kpgbuf || !kpgbuf->sgt) {
+ WARN(1, "kbuf not found or unmapped.\n");
+ mutex_unlock(&fh->mutex);
+ goto error;
+ }
+ mutex_unlock(&fh->mutex);
+ kcmd->kbufs[i] = kpgbuf;
+ if (!kcmd->kbufs[i] || !kcmd->kbufs[i]->sgt ||
+ kcmd->kbufs[i]->len < kcmd->buffers[i].bytes_used)
+ goto error;
+ if ((kcmd->kbufs[i]->flags &
+ IPU_BUFFER_FLAG_NO_FLUSH) ||
+ (kcmd->buffers[i].flags &
+ IPU_BUFFER_FLAG_NO_FLUSH) ||
+ prevfd == kcmd->buffers[i].base.fd)
+ continue;
+
+ prevfd = kcmd->buffers[i].base.fd;
+ dma_sync_sg_for_device(&psys->adev->dev,
+ kcmd->kbufs[i]->sgt->sgl,
+ kcmd->kbufs[i]->sgt->orig_nents,
+ DMA_BIDIRECTIONAL);
+ }
+
+ if (kcmd->state != KCMD_STATE_PPG_START)
+ kcmd->state = KCMD_STATE_PPG_ENQUEUE;
+
+ return kcmd;
+error:
+ ipu_psys_kcmd_free(kcmd);
+
+ dev_dbg(&psys->adev->dev, "failed to copy cmd\n");
+
+ return NULL;
+}
+
+static struct ipu_psys_buffer_set *
+ipu_psys_lookup_kbuffer_set(struct ipu_psys *psys, u32 addr)
+{
+ struct ipu_psys_fh *fh;
+ struct ipu_psys_buffer_set *kbuf_set;
+ struct ipu_psys_scheduler *sched;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ sched = &fh->sched;
+ mutex_lock(&sched->bs_mutex);
+ list_for_each_entry(kbuf_set, &sched->buf_sets, list) {
+ if (kbuf_set->buf_set &&
+ kbuf_set->buf_set->ipu_virtual_address == addr) {
+ mutex_unlock(&sched->bs_mutex);
+ return kbuf_set;
+ }
+ }
+ mutex_unlock(&sched->bs_mutex);
+ }
+
+ return NULL;
+}
+
+static struct ipu_psys_ppg *ipu_psys_lookup_ppg(struct ipu_psys *psys,
+ dma_addr_t pg_addr)
+{
+ struct ipu_psys_scheduler *sched;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_fh *fh;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ sched = &fh->sched;
+ mutex_lock(&fh->mutex);
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ if (pg_addr != kppg->kpg->pg_dma_addr)
+ continue;
+ mutex_unlock(&fh->mutex);
+ return kppg;
+ }
+ mutex_unlock(&fh->mutex);
+ }
+
+ return NULL;
+}
+
+/*
+ * Move kcmd into completed state (due to running finished or failure).
+ * Fill up the event struct and notify waiters.
+ */
+void ipu_psys_kcmd_complete(struct ipu_psys_ppg *kppg,
+ struct ipu_psys_kcmd *kcmd, int error)
+{
+ struct ipu_psys_fh *fh = kcmd->fh;
+ struct ipu_psys *psys = fh->psys;
+
+ kcmd->ev.type = IPU_PSYS_EVENT_TYPE_CMD_COMPLETE;
+ kcmd->ev.user_token = kcmd->user_token;
+ kcmd->ev.issue_id = kcmd->issue_id;
+ kcmd->ev.error = error;
+ list_move_tail(&kcmd->list, &kppg->kcmds_finished_list);
+
+ if (kcmd->constraint.min_freq)
+ ipu_buttress_remove_psys_constraint(psys->adev->isp,
+ &kcmd->constraint);
+
+ if (!early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg) {
+ struct ipu_psys_kbuffer *kbuf;
+
+ kbuf = ipu_psys_lookup_kbuffer_by_kaddr(kcmd->fh,
+ kcmd->pg_user);
+ if (kbuf && kbuf->valid)
+ memcpy(kcmd->pg_user,
+ kcmd->kpg->pg, kcmd->kpg->pg_size);
+ else
+ dev_dbg(&psys->adev->dev, "Skipping unmapped buffer\n");
+ }
+
+ kcmd->state = KCMD_STATE_PPG_COMPLETE;
+ wake_up_interruptible(&fh->wait);
+}
+
+/*
+ * Submit kcmd into psys queue. If running fails, complete the kcmd
+ * with an error.
+ *
+ * Found a runnable PG. Move queue to the list tail for round-robin
+ * scheduling and run the PG. Start the watchdog timer if the PG was
+ * started successfully. Enable PSYS power if requested.
+ */
+int ipu_psys_kcmd_start(struct ipu_psys *psys, struct ipu_psys_kcmd *kcmd)
+{
+ int ret;
+
+ if (psys->adev->isp->flr_done)
+ return -EIO;
+
+ if (early_pg_transfer && kcmd->pg_user && kcmd->kpg->pg)
+ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size);
+
+ ret = ipu_fw_psys_pg_start(kcmd);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to start kcmd!\n");
+ return ret;
+ }
+
+ ipu_fw_psys_pg_dump(psys, kcmd, "run");
+
+ ret = ipu_fw_psys_pg_disown(kcmd);
+ if (ret) {
+ dev_err(&psys->adev->dev, "failed to start kcmd!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ipu_psys_kcmd_send_to_ppg_start(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_psys_fh *fh = kcmd->fh;
+ struct ipu_psys_scheduler *sched = &fh->sched;
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_ppg *kppg;
+ struct ipu_psys_resource_pool *rpr;
+ int queue_id;
+ int ret;
+
+ rpr = &psys->resource_pool_running;
+
+ kppg = kzalloc(sizeof(*kppg), GFP_KERNEL);
+ if (!kppg)
+ return -ENOMEM;
+
+ kppg->fh = fh;
+ kppg->kpg = kcmd->kpg;
+ kppg->state = PPG_STATE_START;
+ kppg->pri_base = kcmd->priority;
+ kppg->pri_dynamic = 0;
+ INIT_LIST_HEAD(&kppg->list);
+
+ mutex_init(&kppg->mutex);
+ INIT_LIST_HEAD(&kppg->kcmds_new_list);
+ INIT_LIST_HEAD(&kppg->kcmds_processing_list);
+ INIT_LIST_HEAD(&kppg->kcmds_finished_list);
+ INIT_LIST_HEAD(&kppg->sched_list);
+
+ kppg->manifest = kzalloc(kcmd->pg_manifest_size, GFP_KERNEL);
+ if (!kppg->manifest) {
+ kfree(kppg);
+ return -ENOMEM;
+ }
+ memcpy(kppg->manifest, kcmd->pg_manifest,
+ kcmd->pg_manifest_size);
+
+ queue_id = ipu_psys_allocate_cmd_queue_resource(rpr);
+ if (queue_id == -ENOSPC) {
+ dev_err(&psys->adev->dev, "no available queue\n");
+ kfree(kppg->manifest);
+ kfree(kppg);
+ mutex_unlock(&psys->mutex);
+ return -ENOMEM;
+ }
+
+ /*
+ * set token as start cmd will immediately be followed by a
+ * enqueue cmd so that kppg could be retrieved.
+ */
+ kppg->token = (u64)kcmd->kpg;
+ ipu_fw_psys_pg_set_token(kcmd, kppg->token);
+ ipu_fw_psys_ppg_set_base_queue_id(kcmd, queue_id);
+ ret = ipu_fw_psys_pg_set_ipu_vaddress(kcmd,
+ kcmd->kpg->pg_dma_addr);
+ if (ret) {
+ ipu_psys_free_cmd_queue_resource(rpr, queue_id);
+ kfree(kppg->manifest);
+ kfree(kppg);
+ return -EIO;
+ }
+ memcpy(kcmd->pg_user, kcmd->kpg->pg, kcmd->kpg->pg_size);
+
+ mutex_lock(&fh->mutex);
+ list_add_tail(&kppg->list, &sched->ppgs);
+ mutex_unlock(&fh->mutex);
+
+ mutex_lock(&kppg->mutex);
+ list_add(&kcmd->list, &kppg->kcmds_new_list);
+ mutex_unlock(&kppg->mutex);
+
+ dev_dbg(&psys->adev->dev,
+ "START ppg(%d, 0x%p) kcmd 0x%p, queue %d\n",
+ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd, queue_id);
+
+ /* Kick l-scheduler thread */
+ atomic_set(&psys->wakeup_count, 1);
+ wake_up_interruptible(&psys->sched_cmd_wq);
+
+ return 0;
+}
+
+static int ipu_psys_kcmd_send_to_ppg(struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_psys_fh *fh = kcmd->fh;
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_ppg *kppg;
+ struct ipu_psys_resource_pool *rpr;
+ unsigned long flags;
+ u8 id;
+ bool resche = true;
+
+ rpr = &psys->resource_pool_running;
+ if (kcmd->state == KCMD_STATE_PPG_START)
+ return ipu_psys_kcmd_send_to_ppg_start(kcmd);
+
+ kppg = ipu_psys_identify_kppg(kcmd);
+ spin_lock_irqsave(&psys->pgs_lock, flags);
+ kcmd->kpg->pg_size = 0;
+ spin_unlock_irqrestore(&psys->pgs_lock, flags);
+ if (!kppg) {
+ dev_err(&psys->adev->dev, "token not match\n");
+ return -EINVAL;
+ }
+
+ kcmd->kpg = kppg->kpg;
+
+ dev_dbg(&psys->adev->dev, "%s ppg(%d, 0x%p) kcmd %p\n",
+ (kcmd->state == KCMD_STATE_PPG_STOP) ?
+ "STOP" : "ENQUEUE",
+ ipu_fw_psys_pg_get_id(kcmd), kppg, kcmd);
+
+ if (kcmd->state == KCMD_STATE_PPG_STOP) {
+ mutex_lock(&kppg->mutex);
+ if (kppg->state == PPG_STATE_STOPPED) {
+ dev_dbg(&psys->adev->dev,
+ "kppg 0x%p stopped!\n", kppg);
+ id = ipu_fw_psys_ppg_get_base_queue_id(kcmd);
+ ipu_psys_free_cmd_queue_resource(rpr, id);
+ ipu_psys_kcmd_complete(kppg, kcmd, 0);
+ pm_runtime_put(&psys->adev->dev);
+ resche = false;
+ } else {
+ list_add(&kcmd->list, &kppg->kcmds_new_list);
+ }
+ mutex_unlock(&kppg->mutex);
+ } else {
+ int ret;
+
+ ret = ipu_psys_ppg_get_bufset(kcmd, kppg);
+ if (ret)
+ return ret;
+
+ mutex_lock(&kppg->mutex);
+ list_add_tail(&kcmd->list, &kppg->kcmds_new_list);
+ mutex_unlock(&kppg->mutex);
+ }
+
+ if (resche) {
+ /* Kick l-scheduler thread */
+ atomic_set(&psys->wakeup_count, 1);
+ wake_up_interruptible(&psys->sched_cmd_wq);
+ }
+ return 0;
+}
+
+int ipu_psys_kcmd_new(struct ipu_psys_command *cmd, struct ipu_psys_fh *fh)
+{
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_kcmd *kcmd;
+ size_t pg_size;
+ int ret;
+
+ if (psys->adev->isp->flr_done)
+ return -EIO;
+
+ kcmd = ipu_psys_copy_cmd(cmd, fh);
+ if (!kcmd)
+ return -EINVAL;
+
+ pg_size = ipu_fw_psys_pg_get_size(kcmd);
+ if (pg_size > kcmd->kpg->pg_size) {
+ dev_dbg(&psys->adev->dev, "pg size mismatch %lu %lu\n",
+ pg_size, kcmd->kpg->pg_size);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (ipu_fw_psys_pg_get_protocol(kcmd) !=
+ IPU_FW_PSYS_PROCESS_GROUP_PROTOCOL_PPG) {
+ dev_err(&psys->adev->dev, "No support legacy pg now\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (cmd->min_psys_freq) {
+ kcmd->constraint.min_freq = cmd->min_psys_freq;
+ ipu_buttress_add_psys_constraint(psys->adev->isp,
+ &kcmd->constraint);
+ }
+
+ ret = ipu_psys_kcmd_send_to_ppg(kcmd);
+ if (ret)
+ goto error;
+
+ dev_dbg(&psys->adev->dev,
+ "IOC_QCMD: user_token:%llx issue_id:0x%llx pri:%d\n",
+ cmd->user_token, cmd->issue_id, cmd->priority);
+
+ return 0;
+
+error:
+ ipu_psys_kcmd_free(kcmd);
+
+ return ret;
+}
+
+static bool ipu_psys_kcmd_is_valid(struct ipu_psys *psys,
+ struct ipu_psys_kcmd *kcmd)
+{
+ struct ipu_psys_fh *fh;
+ struct ipu_psys_kcmd *kcmd0;
+ struct ipu_psys_ppg *kppg, *tmp;
+ struct ipu_psys_scheduler *sched;
+
+ list_for_each_entry(fh, &psys->fhs, list) {
+ sched = &fh->sched;
+ mutex_lock(&fh->mutex);
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ continue;
+ }
+ list_for_each_entry_safe(kppg, tmp, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ list_for_each_entry(kcmd0,
+ &kppg->kcmds_processing_list,
+ list) {
+ if (kcmd0 == kcmd) {
+ mutex_unlock(&kppg->mutex);
+ mutex_unlock(&fh->mutex);
+ return true;
+ }
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ mutex_unlock(&fh->mutex);
+ }
+
+ return false;
+}
+
+void ipu_psys_handle_events(struct ipu_psys *psys)
+{
+ struct ipu_psys_kcmd *kcmd;
+ struct ipu_fw_psys_event event;
+ struct ipu_psys_ppg *kppg;
+ bool error;
+ u32 hdl;
+ u16 cmd, status;
+ int res;
+
+ do {
+ memset(&event, 0, sizeof(event));
+ if (!ipu_fw_psys_rcv_event(psys, &event))
+ break;
+
+ if (!event.context_handle)
+ break;
+
+ dev_dbg(&psys->adev->dev, "ppg event: 0x%x, %d, status %d\n",
+ event.context_handle, event.command, event.status);
+
+ error = false;
+ /*
+ * event.command == CMD_RUN shows this is fw processing frame
+ * done as pPG mode, and event.context_handle should be pointer
+ * of buffer set; so we make use of this pointer to lookup
+ * kbuffer_set and kcmd
+ */
+ hdl = event.context_handle;
+ cmd = event.command;
+ status = event.status;
+
+ kppg = NULL;
+ kcmd = NULL;
+ if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RUN) {
+ struct ipu_psys_buffer_set *kbuf_set;
+ /*
+ * Need change ppg state when the 1st running is done
+ * (after PPG started/resumed)
+ */
+ kbuf_set = ipu_psys_lookup_kbuffer_set(psys, hdl);
+ if (kbuf_set)
+ kcmd = kbuf_set->kcmd;
+ if (!kbuf_set || !kcmd)
+ error = true;
+ else
+ kppg = ipu_psys_identify_kppg(kcmd);
+ } else if (cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_STOP ||
+ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_SUSPEND ||
+ cmd == IPU_FW_PSYS_PROCESS_GROUP_CMD_RESUME) {
+ /*
+ * STOP/SUSPEND/RESUME cmd event would run this branch;
+ * only stop cmd queued by user has stop_kcmd and need
+ * to notify user to dequeue.
+ */
+ kppg = ipu_psys_lookup_ppg(psys, hdl);
+ if (kppg) {
+ mutex_lock(&kppg->mutex);
+ if (kppg->state == PPG_STATE_STOPPING) {
+ kcmd = ipu_psys_ppg_get_stop_kcmd(kppg);
+ if (!kcmd)
+ error = true;
+ }
+ mutex_unlock(&kppg->mutex);
+ }
+ } else {
+ dev_err(&psys->adev->dev, "invalid event\n");
+ continue;
+ }
+
+ if (error || !kppg) {
+ dev_err(&psys->adev->dev, "event error, command %d\n",
+ cmd);
+ break;
+ }
+
+ dev_dbg(&psys->adev->dev, "event to kppg 0x%p, kcmd 0x%p\n",
+ kppg, kcmd);
+
+ ipu_psys_ppg_complete(psys, kppg);
+
+ if (kcmd && ipu_psys_kcmd_is_valid(psys, kcmd)) {
+ res = (status == IPU_PSYS_EVENT_CMD_COMPLETE ||
+ status == IPU_PSYS_EVENT_FRAGMENT_COMPLETE) ?
+ 0 : -EIO;
+ mutex_lock(&kppg->mutex);
+ ipu_psys_kcmd_complete(kppg, kcmd, res);
+ mutex_unlock(&kppg->mutex);
+ }
+ } while (1);
+}
+
+int ipu_psys_fh_init(struct ipu_psys_fh *fh)
+{
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set_tmp;
+ struct ipu_psys_scheduler *sched = &fh->sched;
+ int i;
+
+ mutex_init(&sched->bs_mutex);
+ INIT_LIST_HEAD(&sched->buf_sets);
+ INIT_LIST_HEAD(&sched->ppgs);
+ pm_runtime_dont_use_autosuspend(&psys->adev->dev);
+ /* allocate and map memory for buf_sets */
+ for (i = 0; i < IPU_PSYS_BUF_SET_POOL_SIZE; i++) {
+ kbuf_set = kzalloc(sizeof(*kbuf_set), GFP_KERNEL);
+ if (!kbuf_set)
+ goto out_free_buf_sets;
+ kbuf_set->kaddr = dma_alloc_attrs(&psys->adev->dev,
+ IPU_PSYS_BUF_SET_MAX_SIZE,
+ &kbuf_set->dma_addr,
+ GFP_KERNEL,
+ 0);
+ if (!kbuf_set->kaddr) {
+ kfree(kbuf_set);
+ goto out_free_buf_sets;
+ }
+ kbuf_set->size = IPU_PSYS_BUF_SET_MAX_SIZE;
+ list_add(&kbuf_set->list, &sched->buf_sets);
+ }
+
+ return 0;
+
+out_free_buf_sets:
+ list_for_each_entry_safe(kbuf_set, kbuf_set_tmp,
+ &sched->buf_sets, list) {
+ dma_free_attrs(&psys->adev->dev,
+ kbuf_set->size, kbuf_set->kaddr,
+ kbuf_set->dma_addr, 0);
+ list_del(&kbuf_set->list);
+ kfree(kbuf_set);
+ }
+ mutex_destroy(&sched->bs_mutex);
+
+ return -ENOMEM;
+}
+
+int ipu_psys_fh_deinit(struct ipu_psys_fh *fh)
+{
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_ppg *kppg, *kppg0;
+ struct ipu_psys_kcmd *kcmd, *kcmd0;
+ struct ipu_psys_buffer_set *kbuf_set, *kbuf_set0;
+ struct ipu_psys_scheduler *sched = &fh->sched;
+ struct ipu_psys_resource_pool *rpr;
+ struct ipu_psys_resource_alloc *alloc;
+ u8 id;
+
+ mutex_lock(&fh->mutex);
+ if (!list_empty(&sched->ppgs)) {
+ list_for_each_entry_safe(kppg, kppg0, &sched->ppgs, list) {
+ unsigned long flags;
+
+ mutex_lock(&kppg->mutex);
+ if (!(kppg->state &
+ (PPG_STATE_STOPPED |
+ PPG_STATE_STOPPING))) {
+ struct ipu_psys_kcmd tmp = {
+ .kpg = kppg->kpg,
+ };
+
+ rpr = &psys->resource_pool_running;
+ alloc = &kppg->kpg->resource_alloc;
+ id = ipu_fw_psys_ppg_get_base_queue_id(&tmp);
+ ipu_psys_ppg_stop(kppg);
+ ipu_psys_free_resources(alloc, rpr);
+ ipu_psys_free_cmd_queue_resource(rpr, id);
+ dev_dbg(&psys->adev->dev,
+ "s_change:%s %p %d -> %d\n", __func__,
+ kppg, kppg->state, PPG_STATE_STOPPED);
+ kppg->state = PPG_STATE_STOPPED;
+ if (psys->power_gating != PSYS_POWER_GATED)
+ pm_runtime_put(&psys->adev->dev);
+ }
+ list_del(&kppg->list);
+ mutex_unlock(&kppg->mutex);
+
+ list_for_each_entry_safe(kcmd, kcmd0,
+ &kppg->kcmds_new_list, list) {
+ kcmd->pg_user = NULL;
+ mutex_unlock(&fh->mutex);
+ ipu_psys_kcmd_free(kcmd);
+ mutex_lock(&fh->mutex);
+ }
+
+ list_for_each_entry_safe(kcmd, kcmd0,
+ &kppg->kcmds_processing_list,
+ list) {
+ kcmd->pg_user = NULL;
+ mutex_unlock(&fh->mutex);
+ ipu_psys_kcmd_free(kcmd);
+ mutex_lock(&fh->mutex);
+ }
+
+ list_for_each_entry_safe(kcmd, kcmd0,
+ &kppg->kcmds_finished_list,
+ list) {
+ kcmd->pg_user = NULL;
+ mutex_unlock(&fh->mutex);
+ ipu_psys_kcmd_free(kcmd);
+ mutex_lock(&fh->mutex);
+ }
+
+ spin_lock_irqsave(&psys->pgs_lock, flags);
+ kppg->kpg->pg_size = 0;
+ spin_unlock_irqrestore(&psys->pgs_lock, flags);
+
+ mutex_destroy(&kppg->mutex);
+ kfree(kppg->manifest);
+ kfree(kppg);
+ }
+ }
+ mutex_unlock(&fh->mutex);
+
+ mutex_lock(&sched->bs_mutex);
+ list_for_each_entry_safe(kbuf_set, kbuf_set0, &sched->buf_sets, list) {
+ dma_free_attrs(&psys->adev->dev,
+ kbuf_set->size, kbuf_set->kaddr,
+ kbuf_set->dma_addr, 0);
+ list_del(&kbuf_set->list);
+ kfree(kbuf_set);
+ }
+ mutex_unlock(&sched->bs_mutex);
+ mutex_destroy(&sched->bs_mutex);
+
+ return 0;
+}
+
+struct ipu_psys_kcmd *ipu_get_completed_kcmd(struct ipu_psys_fh *fh)
+{
+ struct ipu_psys_scheduler *sched = &fh->sched;
+ struct ipu_psys_kcmd *kcmd;
+ struct ipu_psys_ppg *kppg;
+
+ mutex_lock(&fh->mutex);
+ if (list_empty(&sched->ppgs)) {
+ mutex_unlock(&fh->mutex);
+ return NULL;
+ }
+
+ list_for_each_entry(kppg, &sched->ppgs, list) {
+ mutex_lock(&kppg->mutex);
+ if (list_empty(&kppg->kcmds_finished_list)) {
+ mutex_unlock(&kppg->mutex);
+ continue;
+ }
+
+ kcmd = list_first_entry(&kppg->kcmds_finished_list,
+ struct ipu_psys_kcmd, list);
+ mutex_unlock(&fh->mutex);
+ mutex_unlock(&kppg->mutex);
+ dev_dbg(&fh->psys->adev->dev,
+ "get completed kcmd 0x%p\n", kcmd);
+ return kcmd;
+ }
+ mutex_unlock(&fh->mutex);
+
+ return NULL;
+}
+
+long ipu_ioctl_dqevent(struct ipu_psys_event *event,
+ struct ipu_psys_fh *fh, unsigned int f_flags)
+{
+ struct ipu_psys *psys = fh->psys;
+ struct ipu_psys_kcmd *kcmd = NULL;
+ int rval;
+
+ dev_dbg(&psys->adev->dev, "IOC_DQEVENT\n");
+
+ if (!(f_flags & O_NONBLOCK)) {
+ rval = wait_event_interruptible(fh->wait,
+ (kcmd =
+ ipu_get_completed_kcmd(fh)));
+ if (rval == -ERESTARTSYS)
+ return rval;
+ }
+
+ if (!kcmd) {
+ kcmd = ipu_get_completed_kcmd(fh);
+ if (!kcmd)
+ return -ENODATA;
+ }
+
+ *event = kcmd->ev;
+ ipu_psys_kcmd_free(kcmd);
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 - 2021 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu.h"
+#include "ipu-cpd.h"
+#include "ipu-isys.h"
+#include "ipu-psys.h"
+#include "ipu-platform.h"
+#include "ipu-platform-regs.h"
+#include "ipu-platform-buttress-regs.h"
+#include "ipu-platform-isys-csi2-reg.h"
+
+struct ipu_cell_program_t {
+ unsigned int magic_number;
+
+ unsigned int blob_offset;
+ unsigned int blob_size;
+
+ unsigned int start[3];
+
+ unsigned int icache_source;
+ unsigned int icache_target;
+ unsigned int icache_size;
+
+ unsigned int pmem_source;
+ unsigned int pmem_target;
+ unsigned int pmem_size;
+
+ unsigned int data_source;
+ unsigned int data_target;
+ unsigned int data_size;
+
+ unsigned int bss_target;
+ unsigned int bss_size;
+
+ unsigned int cell_id;
+ unsigned int regs_addr;
+
+ unsigned int cell_pmem_data_bus_address;
+ unsigned int cell_dmem_data_bus_address;
+ unsigned int cell_pmem_control_bus_address;
+ unsigned int cell_dmem_control_bus_address;
+
+ unsigned int next;
+ unsigned int dummy[2];
+};
+
+static unsigned int ipu6se_csi_offsets[] = {
+ IPU_CSI_PORT_A_ADDR_OFFSET,
+ IPU_CSI_PORT_B_ADDR_OFFSET,
+ IPU_CSI_PORT_C_ADDR_OFFSET,
+ IPU_CSI_PORT_D_ADDR_OFFSET,
+};
+
+static unsigned int ipu6_csi_offsets[] = {
+ IPU_CSI_PORT_A_ADDR_OFFSET,
+ IPU_CSI_PORT_B_ADDR_OFFSET,
+ IPU_CSI_PORT_C_ADDR_OFFSET,
+ IPU_CSI_PORT_D_ADDR_OFFSET,
+ IPU_CSI_PORT_E_ADDR_OFFSET,
+ IPU_CSI_PORT_F_ADDR_OFFSET,
+ IPU_CSI_PORT_G_ADDR_OFFSET,
+ IPU_CSI_PORT_H_ADDR_OFFSET
+};
+
+struct ipu_isys_internal_pdata isys_ipdata = {
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = 3,
+ .mmu_hw = {
+ {
+ .offset = IPU_ISYS_IOMMU0_OFFSET,
+ .info_bits =
+ IPU_INFO_REQUEST_DESTINATION_IOSF,
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 3, 8, 2, 2, 2, 2, 2, 2, 1, 1,
+ 1, 1, 1, 1, 1, 1
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU_ISYS_IOMMU1_OFFSET,
+ .info_bits = IPU_INFO_STREAM_ID_SET(0),
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 1, 1, 4
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU_ISYS_IOMMUI_OFFSET,
+ .info_bits = IPU_INFO_STREAM_ID_SET(0),
+ .nr_l1streams = 0,
+ .nr_l2streams = 0,
+ .insert_read_before_invalidate = false,
+ },
+ },
+ .cdc_fifos = 3,
+ .cdc_fifo_threshold = {6, 8, 2},
+ .dmem_offset = IPU_ISYS_DMEM_OFFSET,
+ .spc_offset = IPU_ISYS_SPC_OFFSET,
+ },
+ .isys_dma_overshoot = IPU_ISYS_OVERALLOC_MIN,
+};
+
+struct ipu_psys_internal_pdata psys_ipdata = {
+ .hw_variant = {
+ .offset = IPU_UNIFIED_OFFSET,
+ .nr_mmus = 4,
+ .mmu_hw = {
+ {
+ .offset = IPU_PSYS_IOMMU0_OFFSET,
+ .info_bits =
+ IPU_INFO_REQUEST_DESTINATION_IOSF,
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU_PSYS_IOMMU1_OFFSET,
+ .info_bits = IPU_INFO_STREAM_ID_SET(0),
+ .nr_l1streams = 32,
+ .l1_block_sz = {
+ 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 10,
+ 5, 4, 14, 6, 4, 14, 6, 4, 8,
+ 4, 2, 1, 1, 1, 1, 14
+ },
+ .nr_l2streams = 32,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU_PSYS_MMU1W_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU_PSYS_IOMMU1R_OFFSET,
+ .info_bits = IPU_INFO_STREAM_ID_SET(0),
+ .nr_l1streams = 16,
+ .l1_block_sz = {
+ 1, 4, 4, 4, 4, 16, 8, 4, 32,
+ 16, 16, 2, 2, 2, 1, 12
+ },
+ .nr_l2streams = 16,
+ .l2_block_sz = {
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2
+ },
+ .insert_read_before_invalidate = false,
+ .l1_stream_id_reg_offset =
+ IPU_MMU_L1_STREAM_ID_REG_OFFSET,
+ .l2_stream_id_reg_offset =
+ IPU_MMU_L2_STREAM_ID_REG_OFFSET,
+ },
+ {
+ .offset = IPU_PSYS_IOMMUI_OFFSET,
+ .info_bits = IPU_INFO_STREAM_ID_SET(0),
+ .nr_l1streams = 0,
+ .nr_l2streams = 0,
+ .insert_read_before_invalidate = false,
+ },
+ },
+ .dmem_offset = IPU_PSYS_DMEM_OFFSET,
+ },
+};
+
+const struct ipu_buttress_ctrl isys_buttress_ctrl = {
+ .ratio = IPU_IS_FREQ_CTL_DEFAULT_RATIO,
+ .qos_floor = IPU_IS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+ .freq_ctl = IPU_BUTTRESS_REG_IS_FREQ_CTL,
+ .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_IS_PWR_SHIFT,
+ .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_IS_PWR_MASK,
+ .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+const struct ipu_buttress_ctrl psys_buttress_ctrl = {
+ .ratio = IPU_PS_FREQ_CTL_DEFAULT_RATIO,
+ .qos_floor = IPU_PS_FREQ_CTL_DEFAULT_QOS_FLOOR_RATIO,
+ .freq_ctl = IPU_BUTTRESS_REG_PS_FREQ_CTL,
+ .pwr_sts_shift = IPU_BUTTRESS_PWR_STATE_PS_PWR_SHIFT,
+ .pwr_sts_mask = IPU_BUTTRESS_PWR_STATE_PS_PWR_MASK,
+ .pwr_sts_on = IPU_BUTTRESS_PWR_STATE_UP_DONE,
+ .pwr_sts_off = IPU_BUTTRESS_PWR_STATE_DN_DONE,
+};
+
+static void ipu6_pkg_dir_configure_spc(struct ipu_device *isp,
+ const struct ipu_hw_variants *hw_variant,
+ int pkg_dir_idx, void __iomem *base,
+ u64 *pkg_dir,
+ dma_addr_t pkg_dir_vied_address)
+{
+ struct ipu_psys *psys = ipu_bus_get_drvdata(isp->psys);
+ struct ipu_isys *isys = ipu_bus_get_drvdata(isp->isys);
+ unsigned int server_fw_virtaddr;
+ struct ipu_cell_program_t *prog;
+ void __iomem *spc_base;
+ dma_addr_t dma_addr;
+
+ if (!pkg_dir || !isp->cpd_fw) {
+ dev_err(&isp->pdev->dev, "invalid addr\n");
+ return;
+ }
+
+ server_fw_virtaddr = *(pkg_dir + (pkg_dir_idx + 1) * 2);
+ if (pkg_dir_idx == IPU_CPD_PKG_DIR_ISYS_SERVER_IDX) {
+ dma_addr = sg_dma_address(isys->fw_sgt.sgl);
+ prog = (struct ipu_cell_program_t *)((u64)isp->cpd_fw->data +
+ (server_fw_virtaddr -
+ dma_addr));
+ } else {
+ dma_addr = sg_dma_address(psys->fw_sgt.sgl);
+ prog = (struct ipu_cell_program_t *)((u64)isp->cpd_fw->data +
+ (server_fw_virtaddr -
+ dma_addr));
+ }
+
+ spc_base = base + prog->regs_addr;
+ if (spc_base != (base + hw_variant->spc_offset))
+ dev_warn(&isp->pdev->dev,
+ "SPC reg addr 0x%p not matching value from CPD 0x%p\n",
+ base + hw_variant->spc_offset, spc_base);
+ writel(server_fw_virtaddr + prog->blob_offset +
+ prog->icache_source, spc_base + IPU_PSYS_REG_SPC_ICACHE_BASE);
+ writel(IPU_INFO_REQUEST_DESTINATION_IOSF,
+ spc_base + IPU_REG_PSYS_INFO_SEG_0_CONFIG_ICACHE_MASTER);
+ writel(prog->start[1], spc_base + IPU_PSYS_REG_SPC_START_PC);
+ writel(pkg_dir_vied_address, base + hw_variant->dmem_offset);
+}
+
+void ipu_configure_spc(struct ipu_device *isp,
+ const struct ipu_hw_variants *hw_variant,
+ int pkg_dir_idx, void __iomem *base, u64 *pkg_dir,
+ dma_addr_t pkg_dir_dma_addr)
+{
+ u32 val;
+ void __iomem *dmem_base = base + hw_variant->dmem_offset;
+ void __iomem *spc_regs_base = base + hw_variant->spc_offset;
+
+ val = readl(spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+ val |= IPU_PSYS_SPC_STATUS_CTRL_ICACHE_INVALIDATE;
+ writel(val, spc_regs_base + IPU_PSYS_REG_SPC_STATUS_CTRL);
+
+ if (isp->secure_mode)
+ writel(IPU_PKG_DIR_IMR_OFFSET, dmem_base);
+ else
+ ipu6_pkg_dir_configure_spc(isp, hw_variant, pkg_dir_idx, base,
+ pkg_dir, pkg_dir_dma_addr);
+}
+EXPORT_SYMBOL(ipu_configure_spc);
+
+int ipu_buttress_psys_freq_get(void *data, u64 *val)
+{
+ struct ipu_device *isp = data;
+ u32 reg_val;
+ int rval;
+
+ rval = pm_runtime_get_sync(&isp->psys->dev);
+ if (rval < 0) {
+ pm_runtime_put(&isp->psys->dev);
+ dev_err(&isp->pdev->dev, "Runtime PM failed (%d)\n", rval);
+ return rval;
+ }
+
+ reg_val = readl(isp->base + BUTTRESS_REG_PS_FREQ_CTL);
+
+ pm_runtime_put(&isp->psys->dev);
+
+ *val = IPU_PS_FREQ_RATIO_BASE *
+ (reg_val & IPU_BUTTRESS_PS_FREQ_CTL_DIVISOR_MASK);
+
+ return 0;
+}
+
+void ipu_internal_pdata_init(void)
+{
+ if (ipu_ver == IPU_VER_6 || ipu_ver == IPU_VER_6EP) {
+ isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6_csi_offsets);
+ isys_ipdata.csi2.offsets = ipu6_csi_offsets;
+ isys_ipdata.num_parallel_streams = IPU6_ISYS_NUM_STREAMS;
+ psys_ipdata.hw_variant.spc_offset = IPU6_PSYS_SPC_OFFSET;
+
+ } else if (ipu_ver == IPU_VER_6SE) {
+ isys_ipdata.csi2.nports = ARRAY_SIZE(ipu6se_csi_offsets);
+ isys_ipdata.csi2.offsets = ipu6se_csi_offsets;
+ isys_ipdata.num_parallel_streams = IPU6SE_ISYS_NUM_STREAMS;
+ psys_ipdata.hw_variant.spc_offset = IPU6SE_PSYS_SPC_OFFSET;
+ }
+}
new file mode 100644
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "ipu-psys.h"
+#include "ipu-fw-psys.h"
+#include "ipu6-platform-resources.h"
+#include "ipu6ep-platform-resources.h"
+
+/* resources table */
+
+/*
+ * Cell types by cell IDs
+ */
+static const u8 ipu6ep_fw_psys_cell_types[IPU6EP_FW_PSYS_N_CELL_ID] = {
+ IPU6_FW_PSYS_SP_CTRL_TYPE_ID,
+ IPU6_FW_PSYS_VP_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_OSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_PSA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* AF */
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_MD */
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* X2B_SVE_RGBIR */
+ IPU6_FW_PSYS_ACC_ISA_TYPE_ID, /* PAF */
+ IPU6_FW_PSYS_GDC_TYPE_ID,
+ IPU6_FW_PSYS_TNR_TYPE_ID,
+};
+
+static const u16 ipu6ep_fw_num_dev_channels[IPU6_FW_PSYS_N_DEV_CHN_ID] = {
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_INTERNAL_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE,
+};
+
+static const u16 ipu6ep_fw_psys_mem_size[IPU6_FW_PSYS_N_MEM_ID] = {
+ IPU6_FW_PSYS_VMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_MAX_SIZE,
+ IPU6_FW_PSYS_LB_VMEM_MAX_SIZE,
+ IPU6_FW_PSYS_BAMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM0_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM1_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM2_MAX_SIZE,
+ IPU6_FW_PSYS_DMEM3_MAX_SIZE,
+ IPU6_FW_PSYS_PMEM0_MAX_SIZE
+};
+
+static const u16 ipu6ep_fw_psys_dfms[IPU6_FW_PSYS_N_DEV_DFM_ID] = {
+ IPU6_FW_PSYS_DEV_DFM_BB_FULL_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_BB_EMPTY_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE,
+ IPU6_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE,
+};
+
+static const u8
+ipu6ep_fw_psys_c_mem[IPU6EP_FW_PSYS_N_CELL_ID][IPU6_FW_PSYS_N_MEM_TYPE_ID] = {
+ {
+ /* IPU6_FW_PSYS_SP0_ID */
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_DMEM0_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_SP1_ID */
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_DMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_VP0_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_DMEM3_ID,
+ IPU6_FW_PSYS_VMEM0_ID,
+ IPU6_FW_PSYS_BAMEM0_ID,
+ IPU6_FW_PSYS_PMEM0_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC1_ID BNLM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC2_ID DM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC3_ID ACM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC4_ID GTC YUV1 */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC5_ID OFS pin main */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC6_ID OFS pin display */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC7_ID OFS pin pp */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC8_ID GAMMASTAR */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC9_ID GLTM */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ACC10_ID XNR */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_ICA_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_LSC_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_DPC_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_SIS_A_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_SIS_B_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_B2B_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_B2R_ID and ISA_R2I_SIE */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_R2I_DS_A_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_AWB_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_AE_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_AF_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_X2B_MD_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_X2B_SVE_RGBIR_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_ISA_PAF_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_LB_VMEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_BB_ACC_GDC0_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ },
+ {
+ /* IPU6_FW_PSYS_BB_ACC_TNR_ID */
+ IPU6_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6_FW_PSYS_TRANSFER_VMEM1_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ IPU6_FW_PSYS_N_MEM_ID,
+ }
+};
+
+static const struct ipu_fw_resource_definitions ipu6ep_defs = {
+ .cells = ipu6ep_fw_psys_cell_types,
+ .num_cells = IPU6EP_FW_PSYS_N_CELL_ID,
+ .num_cells_type = IPU6_FW_PSYS_N_CELL_TYPE_ID,
+
+ .dev_channels = ipu6ep_fw_num_dev_channels,
+ .num_dev_channels = IPU6_FW_PSYS_N_DEV_CHN_ID,
+
+ .num_ext_mem_types = IPU6_FW_PSYS_N_DATA_MEM_TYPE_ID,
+ .num_ext_mem_ids = IPU6_FW_PSYS_N_MEM_ID,
+ .ext_mem_ids = ipu6ep_fw_psys_mem_size,
+
+ .num_dfm_ids = IPU6_FW_PSYS_N_DEV_DFM_ID,
+
+ .dfms = ipu6ep_fw_psys_dfms,
+
+ .cell_mem_row = IPU6_FW_PSYS_N_MEM_TYPE_ID,
+ .cell_mem = &ipu6ep_fw_psys_c_mem[0][0],
+};
+
+const struct ipu_fw_resource_definitions *ipu6ep_res_defs = &ipu6ep_defs;
new file mode 100644
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation */
+
+#ifndef IPU6EP_PLATFORM_RESOURCES_H
+#define IPU6EP_PLATFORM_RESOURCES_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+enum {
+ IPU6EP_FW_PSYS_SP0_ID = 0,
+ IPU6EP_FW_PSYS_VP0_ID,
+ IPU6EP_FW_PSYS_PSA_ACC_BNLM_ID,
+ IPU6EP_FW_PSYS_PSA_ACC_DM_ID,
+ IPU6EP_FW_PSYS_PSA_ACC_ACM_ID,
+ IPU6EP_FW_PSYS_PSA_ACC_GTC_YUV1_ID,
+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_MAIN_ID,
+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_DISPLAY_ID,
+ IPU6EP_FW_PSYS_BB_ACC_OFS_PIN_PP_ID,
+ IPU6EP_FW_PSYS_PSA_ACC_GAMMASTAR_ID,
+ IPU6EP_FW_PSYS_PSA_ACC_GLTM_ID,
+ IPU6EP_FW_PSYS_PSA_ACC_XNR_ID,
+ IPU6EP_FW_PSYS_PSA_VCSC_ID, /* VCSC */
+ IPU6EP_FW_PSYS_ISA_ICA_ID,
+ IPU6EP_FW_PSYS_ISA_LSC_ID,
+ IPU6EP_FW_PSYS_ISA_DPC_ID,
+ IPU6EP_FW_PSYS_ISA_SIS_A_ID,
+ IPU6EP_FW_PSYS_ISA_SIS_B_ID,
+ IPU6EP_FW_PSYS_ISA_B2B_ID,
+ IPU6EP_FW_PSYS_ISA_B2R_R2I_SIE_ID,
+ IPU6EP_FW_PSYS_ISA_R2I_DS_A_ID,
+ IPU6EP_FW_PSYS_ISA_AWB_ID,
+ IPU6EP_FW_PSYS_ISA_AE_ID,
+ IPU6EP_FW_PSYS_ISA_AF_ID,
+ IPU6EP_FW_PSYS_ISA_X2B_MD_ID,
+ IPU6EP_FW_PSYS_ISA_X2B_SVE_RGBIR_ID,
+ IPU6EP_FW_PSYS_ISA_PAF_ID,
+ IPU6EP_FW_PSYS_BB_ACC_GDC0_ID,
+ IPU6EP_FW_PSYS_BB_ACC_TNR_ID,
+ IPU6EP_FW_PSYS_N_CELL_ID
+};
+#endif /* IPU6EP_PLATFORM_RESOURCES_H */
new file mode 100644
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2015 - 2019 Intel Corporation
+
+#include <linux/err.h>
+#include <linux/string.h>
+
+#include "ipu-psys.h"
+#include "ipu-fw-psys.h"
+#include "ipu6se-platform-resources.h"
+
+/* resources table */
+
+/*
+ * Cell types by cell IDs
+ */
+/* resources table */
+
+/*
+ * Cell types by cell IDs
+ */
+const u8 ipu6se_fw_psys_cell_types[IPU6SE_FW_PSYS_N_CELL_ID] = {
+ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID,
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_ICA_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_LSC_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DPC_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_B2B_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_BNLM_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_DM_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AWB_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AE_ID */
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID, /* IPU6SE_FW_PSYS_ISA_AF_ID*/
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID /* PAF */
+};
+
+const u16 ipu6se_fw_num_dev_channels[IPU6SE_FW_PSYS_N_DEV_CHN_ID] = {
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE,
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE,
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE,
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE,
+};
+
+const u16 ipu6se_fw_psys_mem_size[IPU6SE_FW_PSYS_N_MEM_ID] = {
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE,
+ IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE,
+ IPU6SE_FW_PSYS_DMEM0_MAX_SIZE,
+ IPU6SE_FW_PSYS_DMEM1_MAX_SIZE
+};
+
+const u16 ipu6se_fw_psys_dfms[IPU6SE_FW_PSYS_N_DEV_DFM_ID] = {
+ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE,
+ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE
+};
+
+const u8
+ipu6se_fw_psys_c_mem[IPU6SE_FW_PSYS_N_CELL_ID][IPU6SE_FW_PSYS_N_MEM_TYPE_ID] = {
+ { /* IPU6SE_FW_PSYS_SP0_ID */
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_DMEM0_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_ICA_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_LSC_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_DPC_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_B2B_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+
+ { /* IPU6SE_FW_PSYS_ISA_BNLM_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_DM_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_R2I_SIE_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_AWB_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_AE_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_AF_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ },
+ { /* IPU6SE_FW_PSYS_ISA_PAF_ID */
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID,
+ IPU6SE_FW_PSYS_LB_VMEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ IPU6SE_FW_PSYS_N_MEM_ID,
+ }
+};
+
+static const struct ipu_fw_resource_definitions ipu6se_defs = {
+ .cells = ipu6se_fw_psys_cell_types,
+ .num_cells = IPU6SE_FW_PSYS_N_CELL_ID,
+ .num_cells_type = IPU6SE_FW_PSYS_N_CELL_TYPE_ID,
+
+ .dev_channels = ipu6se_fw_num_dev_channels,
+ .num_dev_channels = IPU6SE_FW_PSYS_N_DEV_CHN_ID,
+
+ .num_ext_mem_types = IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID,
+ .num_ext_mem_ids = IPU6SE_FW_PSYS_N_MEM_ID,
+ .ext_mem_ids = ipu6se_fw_psys_mem_size,
+
+ .num_dfm_ids = IPU6SE_FW_PSYS_N_DEV_DFM_ID,
+
+ .dfms = ipu6se_fw_psys_dfms,
+
+ .cell_mem_row = IPU6SE_FW_PSYS_N_MEM_TYPE_ID,
+ .cell_mem = &ipu6se_fw_psys_c_mem[0][0],
+};
+
+const struct ipu_fw_resource_definitions *ipu6se_res_defs = &ipu6se_defs;
new file mode 100644
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 - 2020 Intel Corporation */
+
+#ifndef IPU6SE_PLATFORM_RESOURCES_H
+#define IPU6SE_PLATFORM_RESOURCES_H
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include "ipu-platform-resources.h"
+
+#define IPU6SE_FW_PSYS_N_PADDING_UINT8_IN_PROCESS_EXT_STRUCT 1
+
+enum {
+ IPU6SE_FW_PSYS_CMD_QUEUE_COMMAND_ID = 0,
+ IPU6SE_FW_PSYS_CMD_QUEUE_DEVICE_ID,
+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG0_COMMAND_ID,
+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG1_COMMAND_ID,
+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG2_COMMAND_ID,
+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG3_COMMAND_ID,
+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG4_COMMAND_ID,
+ IPU6SE_FW_PSYS_CMD_QUEUE_PPG5_COMMAND_ID,
+ IPU6SE_FW_PSYS_N_PSYS_CMD_QUEUE_ID
+};
+
+enum {
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_TYPE_ID = 0,
+ IPU6SE_FW_PSYS_LB_VMEM_TYPE_ID,
+ IPU6SE_FW_PSYS_DMEM_TYPE_ID,
+ IPU6SE_FW_PSYS_VMEM_TYPE_ID,
+ IPU6SE_FW_PSYS_BAMEM_TYPE_ID,
+ IPU6SE_FW_PSYS_PMEM_TYPE_ID,
+ IPU6SE_FW_PSYS_N_MEM_TYPE_ID
+};
+
+enum ipu6se_mem_id {
+ IPU6SE_FW_PSYS_TRANSFER_VMEM0_ID = 0, /* TRANSFER VMEM 0 */
+ IPU6SE_FW_PSYS_LB_VMEM_ID, /* LB VMEM */
+ IPU6SE_FW_PSYS_DMEM0_ID, /* SPC0 Dmem */
+ IPU6SE_FW_PSYS_DMEM1_ID, /* SPP0 Dmem */
+ IPU6SE_FW_PSYS_N_MEM_ID
+};
+
+enum {
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_ID = 0,
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_ID,
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_ID,
+ IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_ID,
+ IPU6SE_FW_PSYS_N_DEV_CHN_ID
+};
+
+enum {
+ IPU6SE_FW_PSYS_SP_CTRL_TYPE_ID = 0,
+ IPU6SE_FW_PSYS_SP_SERVER_TYPE_ID,
+ IPU6SE_FW_PSYS_ACC_ISA_TYPE_ID,
+ IPU6SE_FW_PSYS_N_CELL_TYPE_ID
+};
+
+enum {
+ IPU6SE_FW_PSYS_SP0_ID = 0,
+ IPU6SE_FW_PSYS_ISA_ICA_ID,
+ IPU6SE_FW_PSYS_ISA_LSC_ID,
+ IPU6SE_FW_PSYS_ISA_DPC_ID,
+ IPU6SE_FW_PSYS_ISA_B2B_ID,
+ IPU6SE_FW_PSYS_ISA_BNLM_ID,
+ IPU6SE_FW_PSYS_ISA_DM_ID,
+ IPU6SE_FW_PSYS_ISA_R2I_SIE_ID,
+ IPU6SE_FW_PSYS_ISA_R2I_DS_A_ID,
+ IPU6SE_FW_PSYS_ISA_R2I_DS_B_ID,
+ IPU6SE_FW_PSYS_ISA_AWB_ID,
+ IPU6SE_FW_PSYS_ISA_AE_ID,
+ IPU6SE_FW_PSYS_ISA_AF_ID,
+ IPU6SE_FW_PSYS_ISA_PAF_ID,
+ IPU6SE_FW_PSYS_N_CELL_ID
+};
+
+enum {
+ IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID = 0,
+ IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID,
+};
+
+/* Excluding PMEM */
+#define IPU6SE_FW_PSYS_N_DATA_MEM_TYPE_ID (IPU6SE_FW_PSYS_N_MEM_TYPE_ID - 1)
+#define IPU6SE_FW_PSYS_N_DEV_DFM_ID \
+ (IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID + 1)
+#define IPU6SE_FW_PSYS_VMEM0_MAX_SIZE 0x0800
+/* Transfer VMEM0 words, ref HAS Transfer*/
+#define IPU6SE_FW_PSYS_TRANSFER_VMEM0_MAX_SIZE 0x0800
+#define IPU6SE_FW_PSYS_LB_VMEM_MAX_SIZE 0x0400 /* LB VMEM words */
+#define IPU6SE_FW_PSYS_DMEM0_MAX_SIZE 0x4000
+#define IPU6SE_FW_PSYS_DMEM1_MAX_SIZE 0x1000
+
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT0_MAX_SIZE 22
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_READ_MAX_SIZE 22
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_EXT1_WRITE_MAX_SIZE 22
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_IPFD_MAX_SIZE 0
+#define IPU6SE_FW_PSYS_DEV_CHN_DMA_ISA_MAX_SIZE 2
+
+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_FULL_PORT_ID_MAX_SIZE 32
+#define IPU6SE_FW_PSYS_DEV_DFM_LB_FULL_PORT_ID_MAX_SIZE 32
+#define IPU6SE_FW_PSYS_DEV_DFM_ISL_EMPTY_PORT_ID_MAX_SIZE 32
+#define IPU6SE_FW_PSYS_DEV_DFM_LB_EMPTY_PORT_ID_MAX_SIZE 32
+
+#endif /* IPU6SE_PLATFORM_RESOURCES_H */
@@ -2240,5 +2240,15 @@ config MFD_RSMU_SPI
Additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_LJCA
+ tristate "Intel La Jolla Cove Adapter support"
+ select MFD_CORE
+ depends on USB
+ help
+ This adds support for Intel La Jolla Cove USB-I2C/SPI/GPIO
+ Adapter (LJCA). Additional drivers such as I2C_LJCA,
+ GPIO_LJCA, etc. must be enabled in order to use the
+ functionality of the device.
+
endmenu
endif
@@ -279,3 +279,5 @@ rsmu-i2c-objs := rsmu_core.o rsmu_i2c.o
rsmu-spi-objs := rsmu_core.o rsmu_spi.o
obj-$(CONFIG_MFD_RSMU_I2C) += rsmu-i2c.o
obj-$(CONFIG_MFD_RSMU_SPI) += rsmu-spi.o
+
+obj-$(CONFIG_MFD_LJCA) += ljca.o
new file mode 100644
@@ -0,0 +1,1191 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+
+enum ljca_acpi_match_adr {
+ LJCA_ACPI_MATCH_GPIO,
+ LJCA_ACPI_MATCH_I2C1,
+ LJCA_ACPI_MATCH_I2C2,
+ LJCA_ACPI_MATCH_SPI1,
+};
+
+static char *gpio_hids[] = {
+ "INTC1074",
+ "INTC1096",
+};
+static struct mfd_cell_acpi_match ljca_acpi_match_gpio;
+
+static char *i2c_hids[] = {
+ "INTC1075",
+ "INTC1097",
+};
+static struct mfd_cell_acpi_match ljca_acpi_match_i2cs[2];
+
+static char *spi_hids[] = {
+ "INTC1091",
+ "INTC1098",
+};
+static struct mfd_cell_acpi_match ljca_acpi_match_spis[1];
+
+struct ljca_msg {
+ u8 type;
+ u8 cmd;
+ u8 flags;
+ u8 len;
+ u8 data[];
+} __packed;
+
+struct fw_version {
+ u8 major;
+ u8 minor;
+ u16 patch;
+ u16 build;
+} __packed;
+
+/* stub types */
+enum stub_type {
+ MNG_STUB = 1,
+ DIAG_STUB,
+ GPIO_STUB,
+ I2C_STUB,
+ SPI_STUB,
+};
+
+/* command Flags */
+#define ACK_FLAG BIT(0)
+#define RESP_FLAG BIT(1)
+#define CMPL_FLAG BIT(2)
+
+/* MNG stub commands */
+enum ljca_mng_cmd {
+ MNG_GET_VERSION = 1,
+ MNG_RESET_NOTIFY,
+ MNG_RESET,
+ MNG_ENUM_GPIO,
+ MNG_ENUM_I2C,
+ MNG_POWER_STATE_CHANGE,
+ MNG_SET_DFU_MODE,
+ MNG_ENUM_SPI,
+};
+
+/* DIAG commands */
+enum diag_cmd {
+ DIAG_GET_STATE = 1,
+ DIAG_GET_STATISTIC,
+ DIAG_SET_TRACE_LEVEL,
+ DIAG_SET_ECHO_MODE,
+ DIAG_GET_FW_LOG,
+ DIAG_GET_FW_COREDUMP,
+ DIAG_TRIGGER_WDT,
+ DIAG_TRIGGER_FAULT,
+ DIAG_FEED_WDT,
+ DIAG_GET_SECURE_STATE,
+};
+
+struct ljca_i2c_ctr_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+} __packed;
+
+struct ljca_i2c_descriptor {
+ u8 num;
+ struct ljca_i2c_ctr_info info[];
+} __packed;
+
+struct ljca_spi_ctr_info {
+ u8 id;
+ u8 capacity;
+} __packed;
+
+struct ljca_spi_descriptor {
+ u8 num;
+ struct ljca_spi_ctr_info info[];
+} __packed;
+
+struct ljca_bank_descriptor {
+ u8 bank_id;
+ u8 pin_num;
+
+ /* 1 bit for each gpio, 1 means valid */
+ u32 valid_pins;
+} __packed;
+
+struct ljca_gpio_descriptor {
+ u8 pins_per_bank;
+ u8 bank_num;
+ struct ljca_bank_descriptor bank_desc[];
+} __packed;
+
+#define MAX_PACKET_SIZE 64
+#define MAX_PAYLOAD_SIZE (MAX_PACKET_SIZE - sizeof(struct ljca_msg))
+#define USB_WRITE_TIMEOUT 200
+#define USB_WRITE_ACK_TIMEOUT 500
+
+struct ljca_event_cb_entry {
+ struct platform_device *pdev;
+ ljca_event_cb_t notify;
+};
+
+struct ljca_stub_packet {
+ u8 *ibuf;
+ u32 ibuf_len;
+};
+
+struct ljca_stub {
+ struct list_head list;
+ u8 type;
+ struct usb_interface *intf;
+ struct mutex mutex;
+ spinlock_t event_cb_lock;
+
+ struct ljca_stub_packet ipacket;
+
+ /* for identify ack */
+ bool acked;
+ int cur_cmd;
+
+ struct ljca_event_cb_entry event_entry;
+};
+
+static inline void *ljca_priv(const struct ljca_stub *stub)
+{
+ return (char *)stub + sizeof(struct ljca_stub);
+}
+
+enum ljca_state {
+ LJCA_STOPPED,
+ LJCA_INITED,
+ LJCA_RESET_HANDSHAKE,
+ LJCA_RESET_SYNCED,
+ LJCA_ENUM_GPIO_COMPLETE,
+ LJCA_ENUM_I2C_COMPLETE,
+ LJCA_ENUM_SPI_COMPLETE,
+ LJCA_SUSPEND,
+ LJCA_STARTED,
+ LJCA_FAILED,
+};
+
+struct ljca_dev {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ u8 in_ep; /* the address of the bulk in endpoint */
+ u8 out_ep; /* the address of the bulk out endpoint */
+
+ /* the urb/buffer for read */
+ struct urb *in_urb;
+ unsigned char *ibuf;
+ size_t ibuf_len;
+
+ int state;
+
+ atomic_t active_transfers;
+ wait_queue_head_t disconnect_wq;
+
+ struct list_head stubs_list;
+
+ /* to wait for an ongoing write ack */
+ wait_queue_head_t ack_wq;
+
+ struct mfd_cell *cells;
+ int cell_count;
+};
+
+static int try_match_acpi_hid(struct acpi_device *child,
+ struct mfd_cell_acpi_match *match, char **hids,
+ int hids_num)
+{
+ struct acpi_device_id ids[2] = {};
+ int i;
+
+ for (i = 0; i < hids_num; i++) {
+ strlcpy(ids[0].id, hids[i], sizeof(ids[0].id));
+ if (!acpi_match_device_ids(child, ids)) {
+ match->pnpid = hids[i];
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int precheck_acpi_hid(struct usb_interface *intf)
+{
+ struct acpi_device *parent, *child;
+
+ parent = ACPI_COMPANION(&intf->dev);
+ if (!parent)
+ return -ENODEV;
+
+ list_for_each_entry (child, &parent->children, node) {
+ try_match_acpi_hid(child,
+ &ljca_acpi_match_gpio,
+ gpio_hids, ARRAY_SIZE(gpio_hids));
+ try_match_acpi_hid(child,
+ &ljca_acpi_match_i2cs[0],
+ i2c_hids, ARRAY_SIZE(i2c_hids));
+ try_match_acpi_hid(child,
+ &ljca_acpi_match_i2cs[1],
+ i2c_hids, ARRAY_SIZE(i2c_hids));
+ try_match_acpi_hid(child,
+ &ljca_acpi_match_spis[0],
+ spi_hids, ARRAY_SIZE(spi_hids));
+ }
+
+ return 0;
+}
+
+static bool ljca_validate(void *data, u32 data_len)
+{
+ struct ljca_msg *header = (struct ljca_msg *)data;
+
+ return (header->len + sizeof(*header) == data_len);
+}
+
+
+void ljca_dump(struct ljca_dev *ljca, void *buf, int len)
+{
+ int i;
+ u8 tmp[256] = { 0 };
+ int n = 0;
+
+ if (!len)
+ return;
+
+ for (i = 0; i < len; i++)
+ n += scnprintf(tmp + n, sizeof(tmp) - n - 1, "%02x ", ((u8*)buf)[i]);
+
+ dev_dbg(&ljca->intf->dev, "%s\n", tmp);
+}
+
+static struct ljca_stub *ljca_stub_alloc(struct ljca_dev *ljca, int priv_size)
+{
+ struct ljca_stub *stub;
+
+ stub = kzalloc(sizeof(*stub) + priv_size, GFP_KERNEL);
+ if (!stub)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&stub->mutex);
+ spin_lock_init(&stub->event_cb_lock);
+ INIT_LIST_HEAD(&stub->list);
+ list_add_tail(&stub->list, &ljca->stubs_list);
+ dev_dbg(&ljca->intf->dev, "enuming a stub success\n");
+ return stub;
+}
+
+static struct ljca_stub *ljca_stub_find(struct ljca_dev *ljca, u8 type)
+{
+ struct ljca_stub *stub;
+
+ list_for_each_entry (stub, &ljca->stubs_list, list) {
+ if (stub->type == type)
+ return stub;
+ }
+
+ dev_err(&ljca->intf->dev, "usb stub not find, type: %d", type);
+ return ERR_PTR(-ENODEV);
+}
+
+static void ljca_stub_notify(struct ljca_stub *stub, u8 cmd,
+ const void *evt_data, int len)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&stub->event_cb_lock, flags);
+ if (stub->event_entry.notify && stub->event_entry.pdev)
+ stub->event_entry.notify(stub->event_entry.pdev, cmd, evt_data,
+ len);
+ spin_unlock_irqrestore(&stub->event_cb_lock, flags);
+}
+
+static int ljca_parse(struct ljca_dev *ljca, struct ljca_msg *header)
+{
+ struct ljca_stub *stub;
+
+ stub = ljca_stub_find(ljca, header->type);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ if (!(header->flags & ACK_FLAG)) {
+ ljca_stub_notify(stub, header->cmd, header->data, header->len);
+ return 0;
+ }
+
+ if (stub->cur_cmd != header->cmd) {
+ dev_err(&ljca->intf->dev, "header->cmd:%x != stub->cur_cmd:%x",
+ header->cmd, stub->cur_cmd);
+ return -EINVAL;
+ }
+
+ stub->ipacket.ibuf_len = header->len;
+ if (stub->ipacket.ibuf)
+ memcpy(stub->ipacket.ibuf, header->data, header->len);
+
+ stub->acked = true;
+ wake_up(&ljca->ack_wq);
+
+ return 0;
+}
+
+static int ljca_stub_write(struct ljca_stub *stub, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len, bool wait_ack)
+{
+ struct ljca_msg *header;
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ int ret;
+ u8 flags = CMPL_FLAG;
+ int actual;
+
+ atomic_inc(&ljca->active_transfers);
+ if (ljca->state == LJCA_STOPPED)
+ return -ENODEV;
+
+ if (obuf_len > MAX_PAYLOAD_SIZE)
+ return -EINVAL;
+
+ if (wait_ack)
+ flags |= ACK_FLAG;
+
+ stub->ipacket.ibuf_len = 0;
+ header = kmalloc(sizeof(*header) + obuf_len, GFP_KERNEL);
+ if (!header)
+ return -ENOMEM;
+
+ header->type = stub->type;
+ header->cmd = cmd;
+ header->flags = flags;
+ header->len = obuf_len;
+
+ memcpy(header->data, obuf, obuf_len);
+ dev_dbg(&ljca->intf->dev, "send: type:%d cmd:%d flags:%d len:%d\n",
+ header->type, header->cmd, header->flags, header->len);
+ ljca_dump(ljca, header->data, header->len);
+
+ mutex_lock(&stub->mutex);
+ stub->cur_cmd = cmd;
+ stub->ipacket.ibuf = ibuf;
+ stub->acked = false;
+ usb_autopm_get_interface(ljca->intf);
+ ret = usb_bulk_msg(ljca->udev,
+ usb_sndbulkpipe(ljca->udev, ljca->out_ep), header,
+ sizeof(struct ljca_msg) + obuf_len, &actual,
+ USB_WRITE_TIMEOUT);
+ kfree(header);
+ if (ret || actual != sizeof(struct ljca_msg) + obuf_len) {
+ dev_err(&ljca->intf->dev,
+ "bridge write failed ret:%d total_len:%d\n ", ret,
+ actual);
+ goto error;
+ }
+
+ usb_autopm_put_interface(ljca->intf);
+
+ if (wait_ack) {
+ ret = wait_event_timeout(
+ ljca->ack_wq, stub->acked,
+ msecs_to_jiffies(USB_WRITE_ACK_TIMEOUT));
+ if (!ret || !stub->acked) {
+ dev_err(&ljca->intf->dev,
+ "acked sem wait timed out ret:%d timeout:%d ack:%d\n",
+ ret, USB_WRITE_ACK_TIMEOUT, stub->acked);
+ ret = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ if (ibuf_len)
+ *ibuf_len = stub->ipacket.ibuf_len;
+
+ stub->ipacket.ibuf = NULL;
+ stub->ipacket.ibuf_len = 0;
+ ret = 0;
+error:
+ atomic_dec(&ljca->active_transfers);
+ if (ljca->state == LJCA_STOPPED)
+ wake_up(&ljca->disconnect_wq);
+
+ mutex_unlock(&stub->mutex);
+ return ret;
+}
+
+static int ljca_transfer_internal(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len, bool wait_ack)
+{
+ struct ljca_platform_data *ljca_pdata;
+ struct ljca_dev *ljca;
+ struct ljca_stub *stub;
+
+ if (!pdev)
+ return -EINVAL;
+
+ ljca = dev_get_drvdata(pdev->dev.parent);
+ ljca_pdata = dev_get_platdata(&pdev->dev);
+ stub = ljca_stub_find(ljca, ljca_pdata->type);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ return ljca_stub_write(stub, cmd, obuf, obuf_len, ibuf, ibuf_len, wait_ack);
+}
+
+int ljca_transfer(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len)
+{
+ return ljca_transfer_internal(pdev, cmd, obuf, obuf_len, ibuf, ibuf_len,
+ true);
+}
+EXPORT_SYMBOL_GPL(ljca_transfer);
+
+int ljca_transfer_noack(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len)
+{
+ return ljca_transfer_internal(pdev, cmd, obuf, obuf_len, NULL, NULL,
+ false);
+}
+EXPORT_SYMBOL_GPL(ljca_transfer_noack);
+
+int ljca_register_event_cb(struct platform_device *pdev,
+ ljca_event_cb_t event_cb)
+{
+ struct ljca_platform_data *ljca_pdata;
+ struct ljca_dev *ljca;
+ struct ljca_stub *stub;
+ unsigned long flags;
+
+ if (!pdev)
+ return -EINVAL;
+
+ ljca = dev_get_drvdata(pdev->dev.parent);
+ ljca_pdata = dev_get_platdata(&pdev->dev);
+ stub = ljca_stub_find(ljca, ljca_pdata->type);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ spin_lock_irqsave(&stub->event_cb_lock, flags);
+ stub->event_entry.notify = event_cb;
+ stub->event_entry.pdev = pdev;
+ spin_unlock_irqrestore(&stub->event_cb_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ljca_register_event_cb);
+
+void ljca_unregister_event_cb(struct platform_device *pdev)
+{
+ struct ljca_platform_data *ljca_pdata;
+ struct ljca_dev *ljca;
+ struct ljca_stub *stub;
+ unsigned long flags;
+
+ ljca = dev_get_drvdata(pdev->dev.parent);
+ ljca_pdata = dev_get_platdata(&pdev->dev);
+ stub = ljca_stub_find(ljca, ljca_pdata->type);
+ if (IS_ERR(stub))
+ return;
+
+ spin_lock_irqsave(&stub->event_cb_lock, flags);
+ stub->event_entry.notify = NULL;
+ stub->event_entry.pdev = NULL;
+ spin_unlock_irqrestore(&stub->event_cb_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ljca_unregister_event_cb);
+
+static void ljca_stub_cleanup(struct ljca_dev *ljca)
+{
+ struct ljca_stub *stub;
+ struct ljca_stub *next;
+
+ list_for_each_entry_safe (stub, next, &ljca->stubs_list, list) {
+ list_del_init(&stub->list);
+ mutex_destroy(&stub->mutex);
+ kfree(stub);
+ }
+}
+
+static void ljca_read_complete(struct urb *urb)
+{
+ struct ljca_dev *ljca = urb->context;
+ struct ljca_msg *header = urb->transfer_buffer;
+ int len = urb->actual_length;
+ int ret;
+
+ dev_dbg(&ljca->intf->dev,
+ "bulk read urb got message from fw, status:%d data_len:%d\n",
+ urb->status, urb->actual_length);
+
+ if (urb->status) {
+ /* sync/async unlink faults aren't errors */
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN)
+ return;
+
+ dev_err(&ljca->intf->dev, "read bulk urb transfer failed: %d\n",
+ urb->status);
+ goto resubmit;
+ }
+
+ dev_dbg(&ljca->intf->dev, "receive: type:%d cmd:%d flags:%d len:%d\n",
+ header->type, header->cmd, header->flags, header->len);
+ ljca_dump(ljca, header->data, header->len);
+
+ if (!ljca_validate(header, len)) {
+ dev_err(&ljca->intf->dev,
+ "data not correct header->len:%d payload_len:%d\n ",
+ header->len, len);
+ goto resubmit;
+ }
+
+ ret = ljca_parse(ljca, header);
+ if (ret)
+ dev_err(&ljca->intf->dev,
+ "failed to parse data: ret:%d type:%d len: %d", ret,
+ header->type, header->len);
+
+resubmit:
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret)
+ dev_err(&ljca->intf->dev,
+ "failed submitting read urb, error %d\n", ret);
+}
+
+static int ljca_start(struct ljca_dev *ljca)
+{
+ int ret;
+
+ usb_fill_bulk_urb(ljca->in_urb, ljca->udev,
+ usb_rcvbulkpipe(ljca->udev, ljca->in_ep), ljca->ibuf,
+ ljca->ibuf_len, ljca_read_complete, ljca);
+
+ ret = usb_submit_urb(ljca->in_urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&ljca->intf->dev,
+ "failed submitting read urb, error %d\n", ret);
+ }
+ return ret;
+}
+
+struct ljca_mng_priv {
+ long reset_id;
+};
+
+static int ljca_mng_reset_handshake(struct ljca_stub *stub)
+{
+ int ret;
+ struct ljca_mng_priv *priv;
+ __le32 reset_id;
+ __le32 reset_id_ret = 0;
+ int ilen;
+
+ priv = ljca_priv(stub);
+ reset_id = cpu_to_le32(priv->reset_id++);
+ ret = ljca_stub_write(stub, MNG_RESET_NOTIFY, &reset_id,
+ sizeof(reset_id), &reset_id_ret, &ilen, true);
+ if (ret || ilen != sizeof(reset_id_ret) || reset_id_ret != reset_id) {
+ dev_err(&stub->intf->dev,
+ "MNG_RESET_NOTIFY failed reset_id:%d/%d ret:%d\n",
+ le32_to_cpu(reset_id_ret), le32_to_cpu(reset_id), ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline int ljca_mng_reset(struct ljca_stub *stub)
+{
+ return ljca_stub_write(stub, MNG_RESET, NULL, 0, NULL, NULL, true);
+}
+
+static int ljca_add_mfd_cell(struct ljca_dev *ljca, struct mfd_cell *cell)
+{
+ struct mfd_cell *new_cells;
+
+ /* Enumerate the device even if it does not appear in DSDT */
+ if (!cell->acpi_match->pnpid)
+ dev_warn(&ljca->intf->dev,
+ "The HID of cell %s does not exist in DSDT\n",
+ cell->name);
+
+ new_cells = krealloc_array(ljca->cells, (ljca->cell_count + 1),
+ sizeof(struct mfd_cell), GFP_KERNEL);
+ if (!new_cells)
+ return -ENOMEM;
+
+ memcpy(&new_cells[ljca->cell_count], cell, sizeof(*cell));
+ ljca->cells = new_cells;
+ ljca->cell_count++;
+
+ return 0;
+}
+
+static int ljca_gpio_stub_init(struct ljca_dev *ljca,
+ struct ljca_gpio_descriptor *desc)
+{
+ struct ljca_stub *stub;
+ struct mfd_cell cell = { 0 };
+ struct ljca_platform_data *pdata;
+ int gpio_num = desc->pins_per_bank * desc->bank_num;
+ int i;
+ u32 valid_pin[MAX_GPIO_NUM / (sizeof(u32) * BITS_PER_BYTE)];
+
+ if (gpio_num > MAX_GPIO_NUM)
+ return -EINVAL;
+
+ stub = ljca_stub_alloc(ljca, sizeof(*pdata));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = GPIO_STUB;
+ stub->intf = ljca->intf;
+
+ pdata = ljca_priv(stub);
+ pdata->type = stub->type;
+ pdata->gpio_info.num = gpio_num;
+
+ for (i = 0; i < desc->bank_num; i++)
+ valid_pin[i] = desc->bank_desc[i].valid_pins;
+
+ bitmap_from_arr32(pdata->gpio_info.valid_pin_map, valid_pin, gpio_num);
+
+ cell.name = "ljca-gpio";
+ cell.platform_data = pdata;
+ cell.pdata_size = sizeof(*pdata);
+ cell.acpi_match = &ljca_acpi_match_gpio;
+
+ return ljca_add_mfd_cell(ljca, &cell);
+}
+
+static int ljca_mng_enum_gpio(struct ljca_stub *stub)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ struct ljca_gpio_descriptor *desc;
+ int ret;
+ int len;
+
+ desc = kzalloc(MAX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = ljca_stub_write(stub, MNG_ENUM_GPIO, NULL, 0, desc, &len, true);
+ if (ret || len != sizeof(*desc) + desc->bank_num *
+ sizeof(desc->bank_desc[0])) {
+ dev_err(&stub->intf->dev,
+ "enum gpio failed ret:%d len:%d bank_num:%d\n", ret,
+ len, desc->bank_num);
+ }
+
+ ret = ljca_gpio_stub_init(ljca, desc);
+ kfree(desc);
+ return ret;
+}
+
+static int ljca_i2c_stub_init(struct ljca_dev *ljca,
+ struct ljca_i2c_descriptor *desc)
+{
+ struct ljca_stub *stub;
+ struct ljca_platform_data *pdata;
+ int i;
+ int ret;
+
+ stub = ljca_stub_alloc(ljca, desc->num * sizeof(*pdata));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = I2C_STUB;
+ stub->intf = ljca->intf;
+ pdata = ljca_priv(stub);
+
+ for (i = 0; i < desc->num; i++) {
+ struct mfd_cell cell = { 0 };
+ pdata[i].type = stub->type;
+
+ pdata[i].i2c_info.id = desc->info[i].id;
+ pdata[i].i2c_info.capacity = desc->info[i].capacity;
+ pdata[i].i2c_info.intr_pin = desc->info[i].intr_pin;
+
+ cell.name = "ljca-i2c";
+ cell.platform_data = &pdata[i];
+ cell.pdata_size = sizeof(pdata[i]);
+ if (i < ARRAY_SIZE(ljca_acpi_match_i2cs))
+ cell.acpi_match = &ljca_acpi_match_i2cs[i];
+
+ ret = ljca_add_mfd_cell(ljca, &cell);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ljca_mng_enum_i2c(struct ljca_stub *stub)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ struct ljca_i2c_descriptor *desc;
+ int ret;
+ int len;
+
+ desc = kzalloc(MAX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = ljca_stub_write(stub, MNG_ENUM_I2C, NULL, 0, desc, &len, true);
+ if (ret) {
+ dev_err(&stub->intf->dev,
+ "MNG_ENUM_I2C failed ret:%d len:%d num:%d\n", ret, len,
+ desc->num);
+ kfree(desc);
+ return -EIO;
+ }
+
+ ret = ljca_i2c_stub_init(ljca, desc);
+ kfree(desc);
+ return ret;
+}
+
+static int ljca_spi_stub_init(struct ljca_dev *ljca,
+ struct ljca_spi_descriptor *desc)
+{
+ struct ljca_stub *stub;
+ struct ljca_platform_data *pdata;
+ int i;
+ int ret;
+
+ stub = ljca_stub_alloc(ljca, desc->num * sizeof(*pdata));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = SPI_STUB;
+ stub->intf = ljca->intf;
+ pdata = ljca_priv(stub);
+
+ for (i = 0; i < desc->num; i++) {
+ struct mfd_cell cell = { 0 };
+ pdata[i].type = stub->type;
+
+ pdata[i].spi_info.id = desc->info[i].id;
+ pdata[i].spi_info.capacity = desc->info[i].capacity;
+
+ cell.name = "ljca-spi";
+ cell.platform_data = &pdata[i];
+ cell.pdata_size = sizeof(pdata[i]);
+ if (i < ARRAY_SIZE(ljca_acpi_match_spis))
+ cell.acpi_match = &ljca_acpi_match_spis[i];
+
+ ret = ljca_add_mfd_cell(ljca, &cell);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ljca_mng_enum_spi(struct ljca_stub *stub)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(stub->intf);
+ struct ljca_spi_descriptor *desc;
+ int ret;
+ int len;
+
+ desc = kzalloc(MAX_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = ljca_stub_write(stub, MNG_ENUM_SPI, NULL, 0, desc, &len, true);
+ if (ret) {
+ dev_err(&stub->intf->dev,
+ "MNG_ENUM_I2C failed ret:%d len:%d num:%d\n", ret, len,
+ desc->num);
+ kfree(desc);
+ return -EIO;
+ }
+
+ ret = ljca_spi_stub_init(ljca, desc);
+ kfree(desc);
+ return ret;
+}
+
+static int ljca_mng_get_version(struct ljca_stub *stub, char *buf)
+{
+ struct fw_version version = {0};
+ int ret;
+ int len;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = ljca_stub_write(stub, MNG_GET_VERSION, NULL, 0, &version, &len, true);
+ if (ret || len < sizeof(struct fw_version)) {
+ dev_err(&stub->intf->dev,
+ "MNG_GET_VERSION failed ret:%d len:%d\n", ret, len);
+ return ret;
+ }
+
+ return sysfs_emit(buf, "%d.%d.%d.%d\n", version.major, version.minor,
+ le16_to_cpu(version.patch), le16_to_cpu(version.build));
+}
+
+static inline int ljca_mng_set_dfu_mode(struct ljca_stub *stub)
+{
+ return ljca_stub_write(stub, MNG_SET_DFU_MODE, NULL, 0, NULL, NULL, true);
+}
+
+static int ljca_mng_link(struct ljca_dev *ljca, struct ljca_stub *stub)
+{
+ int ret;
+
+ ret = ljca_mng_reset_handshake(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_RESET_SYNCED;
+
+ ret = ljca_mng_enum_gpio(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_ENUM_GPIO_COMPLETE;
+
+ ret = ljca_mng_enum_i2c(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_ENUM_I2C_COMPLETE;
+
+ ret = ljca_mng_enum_spi(stub);
+ if (ret)
+ return ret;
+
+ ljca->state = LJCA_ENUM_SPI_COMPLETE;
+ return ret;
+}
+
+static int ljca_mng_init(struct ljca_dev *ljca)
+{
+ struct ljca_stub *stub;
+ struct ljca_mng_priv *priv;
+ int ret;
+
+ stub = ljca_stub_alloc(ljca, sizeof(*priv));
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ priv = ljca_priv(stub);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reset_id = 0;
+ stub->type = MNG_STUB;
+ stub->intf = ljca->intf;
+
+ ret = ljca_mng_link(ljca, stub);
+ if (ret)
+ dev_err(&ljca->intf->dev,
+ "mng stub link done ret:%d state:%d\n", ret,
+ ljca->state);
+
+ return ret;
+}
+
+static inline int ljca_diag_get_fw_log(struct ljca_stub *stub, void *buf)
+{
+ int ret;
+ int len;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = ljca_stub_write(stub, DIAG_GET_FW_LOG, NULL, 0, buf, &len, true);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static inline int ljca_diag_get_coredump(struct ljca_stub *stub, void *buf)
+{
+ int ret;
+ int len;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = ljca_stub_write(stub, DIAG_GET_FW_COREDUMP, NULL, 0, buf, &len, true);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static inline int ljca_diag_set_trace_level(struct ljca_stub *stub, u8 level)
+{
+ return ljca_stub_write(stub, DIAG_SET_TRACE_LEVEL, &level,
+ sizeof(level), NULL, NULL, true);
+}
+
+static int ljca_diag_init(struct ljca_dev *ljca)
+{
+ struct ljca_stub *stub;
+
+ stub = ljca_stub_alloc(ljca, 0);
+ if (IS_ERR(stub))
+ return PTR_ERR(stub);
+
+ stub->type = DIAG_STUB;
+ stub->intf = ljca->intf;
+ return 0;
+}
+
+static void ljca_delete(struct ljca_dev *ljca)
+{
+ usb_free_urb(ljca->in_urb);
+ usb_put_intf(ljca->intf);
+ usb_put_dev(ljca->udev);
+ kfree(ljca->ibuf);
+ kfree(ljca->cells);
+ kfree(ljca);
+}
+
+static int ljca_init(struct ljca_dev *ljca)
+{
+ init_waitqueue_head(&ljca->ack_wq);
+ init_waitqueue_head(&ljca->disconnect_wq);
+ INIT_LIST_HEAD(&ljca->stubs_list);
+
+ ljca->state = LJCA_INITED;
+
+ return 0;
+}
+
+static void ljca_stop(struct ljca_dev *ljca)
+{
+ ljca->state = LJCA_STOPPED;
+ wait_event_interruptible(ljca->disconnect_wq,
+ !atomic_read(&ljca->active_transfers));
+ usb_kill_urb(ljca->in_urb);
+}
+
+static ssize_t cmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *mng_stub = ljca_stub_find(ljca, MNG_STUB);
+ struct ljca_stub *diag_stub = ljca_stub_find(ljca, DIAG_STUB);
+
+ if (sysfs_streq(buf, "dfu"))
+ ljca_mng_set_dfu_mode(mng_stub);
+ else if (sysfs_streq(buf, "reset"))
+ ljca_mng_reset(mng_stub);
+ else if (sysfs_streq(buf, "debug"))
+ ljca_diag_set_trace_level(diag_stub, 3);
+
+ return count;
+}
+
+static ssize_t cmd_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", "supported cmd: [dfu, reset, debug]");
+}
+static DEVICE_ATTR_RW(cmd);
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *stub = ljca_stub_find(ljca, MNG_STUB);
+
+ return ljca_mng_get_version(stub, buf);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t log_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *diag_stub = ljca_stub_find(ljca, DIAG_STUB);
+
+ return ljca_diag_get_fw_log(diag_stub, buf);
+}
+static DEVICE_ATTR_RO(log);
+
+static ssize_t coredump_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+ struct ljca_stub *diag_stub = ljca_stub_find(ljca, DIAG_STUB);
+
+ return ljca_diag_get_coredump(diag_stub, buf);
+}
+static DEVICE_ATTR_RO(coredump);
+
+static struct attribute *ljca_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_cmd.attr,
+ &dev_attr_log.attr,
+ &dev_attr_coredump.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ljca);
+
+static int ljca_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct ljca_dev *ljca;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ int ret;
+
+ ret = precheck_acpi_hid(intf);
+ if(ret)
+ return ret;
+
+
+ /* allocate memory for our device state and initialize it */
+ ljca = kzalloc(sizeof(*ljca), GFP_KERNEL);
+ if (!ljca)
+ return -ENOMEM;
+
+ ljca_init(ljca);
+ ljca->udev = usb_get_dev(interface_to_usbdev(intf));
+ ljca->intf = usb_get_intf(intf);
+
+ /* set up the endpoint information use only the first bulk-in and bulk-out endpoints */
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in,
+ &bulk_out, NULL, NULL);
+ if (ret) {
+ dev_err(&intf->dev,
+ "Could not find both bulk-in and bulk-out endpoints\n");
+ goto error;
+ }
+
+ ljca->ibuf_len = usb_endpoint_maxp(bulk_in);
+ ljca->in_ep = bulk_in->bEndpointAddress;
+ ljca->ibuf = kzalloc(ljca->ibuf_len, GFP_KERNEL);
+ if (!ljca->ibuf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ljca->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ljca->in_urb) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ljca->out_ep = bulk_out->bEndpointAddress;
+ dev_dbg(&intf->dev, "bulk_in size:%zu addr:%d bulk_out addr:%d\n",
+ ljca->ibuf_len, ljca->in_ep, ljca->out_ep);
+
+ /* save our data pointer in this intf device */
+ usb_set_intfdata(intf, ljca);
+ ret = ljca_start(ljca);
+ if (ret) {
+ dev_err(&intf->dev, "bridge read start failed ret %d\n", ret);
+ goto error;
+ }
+
+ ret = ljca_mng_init(ljca);
+ if (ret) {
+ dev_err(&intf->dev, "register mng stub failed ret %d\n", ret);
+ goto error;
+ }
+
+ ret = ljca_diag_init(ljca);
+ if (ret) {
+ dev_err(&intf->dev, "register diag stub failed ret %d\n", ret);
+ goto error;
+ }
+
+ ret = mfd_add_hotplug_devices(&intf->dev, ljca->cells,
+ ljca->cell_count);
+ if (ret) {
+ dev_err(&intf->dev, "failed to add mfd devices to core %d\n",
+ ljca->cell_count);
+ goto error;
+ }
+
+ ljca->state = LJCA_STARTED;
+ dev_info(&intf->dev, "LJCA USB device init success\n");
+ return 0;
+
+error:
+ dev_err(&intf->dev, "LJCA USB device init failed\n");
+ /* this frees allocated memory */
+ ljca_stub_cleanup(ljca);
+ ljca_delete(ljca);
+ return ret;
+}
+
+static void ljca_disconnect(struct usb_interface *intf)
+{
+ struct ljca_dev *ljca;
+
+ ljca = usb_get_intfdata(intf);
+
+ ljca_stop(ljca);
+ mfd_remove_devices(&intf->dev);
+ ljca_stub_cleanup(ljca);
+ usb_set_intfdata(intf, NULL);
+ ljca_delete(ljca);
+ dev_dbg(&intf->dev, "LJCA disconnected\n");
+}
+
+static int ljca_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+
+ ljca_stop(ljca);
+ ljca->state = LJCA_SUSPEND;
+
+ dev_dbg(&intf->dev, "LJCA suspend\n");
+ return 0;
+}
+
+static int ljca_resume(struct usb_interface *intf)
+{
+ struct ljca_dev *ljca = usb_get_intfdata(intf);
+
+ ljca->state = LJCA_STARTED;
+ dev_dbg(&intf->dev, "LJCA resume\n");
+ return ljca_start(ljca);
+}
+
+static const struct usb_device_id ljca_table[] = {
+ {USB_DEVICE(0x8086, 0x0b63)},
+ {}
+};
+MODULE_DEVICE_TABLE(usb, ljca_table);
+
+static struct usb_driver ljca_driver = {
+ .name = "ljca",
+ .probe = ljca_probe,
+ .disconnect = ljca_disconnect,
+ .suspend = ljca_suspend,
+ .resume = ljca_resume,
+ .id_table = ljca_table,
+ .dev_groups = ljca_groups,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(ljca_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB driver");
+MODULE_LICENSE("GPL v2");
@@ -477,6 +477,7 @@ source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
+source "drivers/misc/ivsc/Kconfig"
source "drivers/misc/vmw_vmci/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
@@ -41,6 +41,7 @@ obj-y += ti-st/
obj-y += lis3lv02d/
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
+obj-$(CONFIG_INTEL_VSC) += ivsc/
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
new file mode 100644
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2021, Intel Corporation. All rights reserved.
+
+config INTEL_VSC
+ tristate "Intel VSC"
+ select INTEL_MEI_VSC
+ help
+ Add support of Intel Visual Sensing Controller (IVSC).
+
+config INTEL_VSC_CSI
+ tristate "Intel VSC CSI client"
+ depends on INTEL_VSC
+ select INTEL_MEI
+ help
+ Add CSI support for Intel Visual Sensing Controller (IVSC).
+
+config INTEL_VSC_ACE
+ tristate "Intel VSC ACE client"
+ depends on INTEL_VSC
+ select INTEL_MEI
+ help
+ Add ACE support for Intel Visual Sensing Controller (IVSC).
+
+config INTEL_VSC_PSE
+ tristate "Intel VSC PSE client"
+ depends on DEBUG_FS
+ select INTEL_MEI
+ select INTEL_MEI_VSC
+ help
+ Add PSE support for Intel Visual Sensing Controller (IVSC) to
+ expose debugging information in files under /sys/kernel/debug/.
+
+config INTEL_VSC_ACE_DEBUG
+ tristate "Intel VSC ACE debug client"
+ depends on DEBUG_FS
+ select INTEL_MEI
+ select INTEL_MEI_VSC
+ help
+ Add ACE debug support for Intel Visual Sensing Controller (IVSC)
+ to expose debugging information in files under /sys/kernel/debug/.
new file mode 100644
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2021, Intel Corporation. All rights reserved.
+
+obj-$(CONFIG_INTEL_VSC) += intel_vsc.o
+obj-$(CONFIG_INTEL_VSC_CSI) += mei_csi.o
+obj-$(CONFIG_INTEL_VSC_ACE) += mei_ace.o
+obj-$(CONFIG_INTEL_VSC_PSE) += mei_pse.o
+obj-$(CONFIG_INTEL_VSC_ACE_DEBUG) += mei_ace_debug.o
new file mode 100644
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/vsc.h>
+
+#include "intel_vsc.h"
+
+#define ACE_PRIVACY_ON 2
+
+struct intel_vsc {
+ spinlock_t lock;
+ struct mutex mutex;
+
+ void *csi;
+ struct vsc_csi_ops *csi_ops;
+ uint16_t csi_registerred;
+
+ void *ace;
+ struct vsc_ace_ops *ace_ops;
+ uint16_t ace_registerred;
+};
+
+static struct intel_vsc vsc;
+
+static int check_component_ready(void)
+{
+ int ret = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vsc.lock, flags);
+
+ if (vsc.ace_registerred && vsc.csi_registerred)
+ ret = 0;
+
+ spin_unlock_irqrestore(&vsc.lock, flags);
+
+ return ret;
+}
+
+static void update_camera_status(struct vsc_camera_status *status,
+ struct camera_status *s)
+{
+ if (status && s) {
+ status->owner = s->camera_owner;
+ status->exposure_level = s->exposure_level;
+ status->status = VSC_PRIVACY_OFF;
+
+ if (s->privacy_stat == ACE_PRIVACY_ON)
+ status->status = VSC_PRIVACY_ON;
+ }
+}
+
+int vsc_register_ace(void *ace, struct vsc_ace_ops *ops)
+{
+ unsigned long flags;
+
+ if (ace && ops) {
+ if (ops->ipu_own_camera && ops->ace_own_camera) {
+ spin_lock_irqsave(&vsc.lock, flags);
+
+ vsc.ace = ace;
+ vsc.ace_ops = ops;
+ vsc.ace_registerred = true;
+
+ spin_unlock_irqrestore(&vsc.lock, flags);
+
+ return 0;
+ }
+ }
+
+ pr_err("register ace failed\n");
+ return -1;
+}
+EXPORT_SYMBOL_GPL(vsc_register_ace);
+
+void vsc_unregister_ace(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vsc.lock, flags);
+
+ vsc.ace_registerred = false;
+
+ spin_unlock_irqrestore(&vsc.lock, flags);
+}
+EXPORT_SYMBOL_GPL(vsc_unregister_ace);
+
+int vsc_register_csi(void *csi, struct vsc_csi_ops *ops)
+{
+ unsigned long flags;
+
+ if (csi && ops) {
+ if (ops->set_privacy_callback &&
+ ops->set_owner && ops->set_mipi_conf) {
+ spin_lock_irqsave(&vsc.lock, flags);
+
+ vsc.csi = csi;
+ vsc.csi_ops = ops;
+ vsc.csi_registerred = true;
+
+ spin_unlock_irqrestore(&vsc.lock, flags);
+
+ return 0;
+ }
+ }
+
+ pr_err("register csi failed\n");
+ return -1;
+}
+EXPORT_SYMBOL_GPL(vsc_register_csi);
+
+void vsc_unregister_csi(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&vsc.lock, flags);
+
+ vsc.csi_registerred = false;
+
+ spin_unlock_irqrestore(&vsc.lock, flags);
+}
+EXPORT_SYMBOL_GPL(vsc_unregister_csi);
+
+int vsc_acquire_camera_sensor(struct vsc_mipi_config *config,
+ vsc_privacy_callback_t callback,
+ void *handle,
+ struct vsc_camera_status *status)
+{
+ int ret;
+ struct camera_status s;
+ struct mipi_conf conf = { 0 };
+
+ struct vsc_csi_ops *csi_ops;
+ struct vsc_ace_ops *ace_ops;
+
+ if (!config)
+ return -EINVAL;
+
+ ret = check_component_ready();
+ if (ret < 0) {
+ pr_info("intel vsc not ready\n");
+ return -EAGAIN;
+ }
+
+ mutex_lock(&vsc.mutex);
+ /* no need check component again here */
+
+ csi_ops = vsc.csi_ops;
+ ace_ops = vsc.ace_ops;
+
+ csi_ops->set_privacy_callback(vsc.csi, callback, handle);
+
+ ret = ace_ops->ipu_own_camera(vsc.ace, &s);
+ if (ret) {
+ pr_err("ipu own camera failed\n");
+ goto err;
+ }
+ update_camera_status(status, &s);
+
+ ret = csi_ops->set_owner(vsc.csi, CSI_IPU);
+ if (ret) {
+ pr_err("ipu own csi failed\n");
+ goto err;
+ }
+
+ conf.lane_num = config->lane_num;
+ conf.freq = config->freq;
+ ret = csi_ops->set_mipi_conf(vsc.csi, &conf);
+ if (ret) {
+ pr_err("config mipi failed\n");
+ goto err;
+ }
+
+err:
+ mutex_unlock(&vsc.mutex);
+ msleep(100);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vsc_acquire_camera_sensor);
+
+int vsc_release_camera_sensor(struct vsc_camera_status *status)
+{
+ int ret;
+ struct camera_status s;
+
+ struct vsc_csi_ops *csi_ops;
+ struct vsc_ace_ops *ace_ops;
+
+ ret = check_component_ready();
+ if (ret < 0) {
+ pr_info("intel vsc not ready\n");
+ return -EAGAIN;
+ }
+
+ mutex_lock(&vsc.mutex);
+ /* no need check component again here */
+
+ csi_ops = vsc.csi_ops;
+ ace_ops = vsc.ace_ops;
+
+ csi_ops->set_privacy_callback(vsc.csi, NULL, NULL);
+
+ ret = csi_ops->set_owner(vsc.csi, CSI_FW);
+ if (ret) {
+ pr_err("vsc own csi failed\n");
+ goto err;
+ }
+
+ ret = ace_ops->ace_own_camera(vsc.ace, &s);
+ if (ret) {
+ pr_err("vsc own camera failed\n");
+ goto err;
+ }
+ update_camera_status(status, &s);
+
+err:
+ mutex_unlock(&vsc.mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vsc_release_camera_sensor);
+
+static int __init intel_vsc_init(void)
+{
+ memset(&vsc, 0, sizeof(struct intel_vsc));
+
+ spin_lock_init(&vsc.lock);
+ mutex_init(&vsc.mutex);
+
+ vsc.csi_registerred = false;
+ vsc.ace_registerred = false;
+
+ return 0;
+}
+
+static void __exit intel_vsc_exit(void)
+{
+}
+
+module_init(intel_vsc_init);
+module_exit(intel_vsc_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("post: mei_csi mei_ace");
+MODULE_DESCRIPTION("Device driver for Intel VSC");
new file mode 100644
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _INTEL_VSC_H_
+#define _INTEL_VSC_H_
+
+#include <linux/types.h>
+
+/* csi power state definition */
+enum csi_power_state {
+ POWER_OFF = 0,
+ POWER_ON,
+};
+
+/* csi ownership definition */
+enum csi_owner {
+ CSI_FW = 0,
+ CSI_IPU,
+};
+
+/* mipi configuration structure */
+struct mipi_conf {
+ uint32_t lane_num;
+ uint32_t freq;
+
+ /* for future use */
+ uint32_t rsvd[2];
+} __packed;
+
+/* camera status structure */
+struct camera_status {
+ uint8_t camera_owner : 2;
+ uint8_t privacy_stat : 2;
+
+ /* for future use */
+ uint8_t rsvd : 4;
+
+ uint32_t exposure_level;
+} __packed;
+
+struct vsc_ace_ops {
+ /**
+ * @brief ace own camera ownership
+ *
+ * @param ace The pointer of ace client device
+ * @param status The pointer of camera status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*ace_own_camera)(void *ace, struct camera_status *status);
+
+ /**
+ * @brief ipu own camera ownership
+ *
+ * @param ace The pointer of ace client device
+ * @param status The pointer of camera status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*ipu_own_camera)(void *ace, struct camera_status *status);
+
+ /**
+ * @brief get current camera status
+ *
+ * @param ace The pointer of ace client device
+ * @param status The pointer of camera status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_camera_status)(void *ace, struct camera_status *status);
+};
+
+struct vsc_csi_ops {
+ /**
+ * @brief set csi ownership
+ *
+ * @param csi The pointer of csi client device
+ * @param owner The csi ownership going to set
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*set_owner)(void *csi, enum csi_owner owner);
+
+ /**
+ * @brief get current csi ownership
+ *
+ * @param csi The pointer of csi client device
+ * @param owner The pointer of csi ownership
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_owner)(void *csi, enum csi_owner *owner);
+
+ /**
+ * @brief configure csi with provided parameter
+ *
+ * @param csi The pointer of csi client device
+ * @param config The pointer of csi configuration
+ * parameter going to set
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*set_mipi_conf)(void *csi, struct mipi_conf *conf);
+
+ /**
+ * @brief get the current csi configuration
+ *
+ * @param csi The pointer of csi client device
+ * @param config The pointer of csi configuration parameter
+ * holding the returned result
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_mipi_conf)(void *csi, struct mipi_conf *conf);
+
+ /**
+ * @brief set csi power state
+ *
+ * @param csi The pointer of csi client device
+ * @param status csi power status going to set
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*set_power_state)(void *csi, enum csi_power_state state);
+
+ /**
+ * @brief get csi power state
+ *
+ * @param csi The pointer of csi client device
+ * @param status The pointer of variable holding csi power status
+ *
+ * @return 0 on success, negative on failure
+ */
+ int (*get_power_state)(void *csi, enum csi_power_state *state);
+
+ /**
+ * @brief set csi privacy callback
+ *
+ * @param csi The pointer of csi client device
+ * @param callback The pointer of privacy callback function
+ * @param handle Privacy callback runtime context
+ */
+ void (*set_privacy_callback)(void *csi,
+ vsc_privacy_callback_t callback,
+ void *handle);
+};
+
+/**
+ * @brief register ace client
+ *
+ * @param ace The pointer of ace client device
+ * @param ops The pointer of ace ops
+ *
+ * @return 0 on success, negative on failure
+ */
+int vsc_register_ace(void *ace, struct vsc_ace_ops *ops);
+
+/**
+ * @brief unregister ace client
+ */
+void vsc_unregister_ace(void);
+
+/**
+ * @brief register csi client
+ *
+ * @param csi The pointer of csi client device
+ * @param ops The pointer of csi ops
+ *
+ * @return 0 on success, negative on failure
+ */
+int vsc_register_csi(void *csi, struct vsc_csi_ops *ops);
+
+/**
+ * @brief unregister csi client
+ */
+void vsc_unregister_csi(void);
+
+#endif
new file mode 100644
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/vsc.h>
+
+#include "intel_vsc.h"
+
+#define ACE_TIMEOUT (5 * HZ)
+#define MEI_ACE_DRIVER_NAME "vsc_ace"
+
+#define UUID_GET_FW_ID UUID_LE(0x6167DCFB, 0x72F1, 0x4584, \
+ 0xBF, 0xE3, 0x84, 0x17, 0x71, 0xAA, 0x79, 0x0B)
+
+enum notif_rsp {
+ NOTIF = 0,
+ REPLY = 1,
+};
+
+enum notify_type {
+ FW_READY = 8,
+ EXCEPTION = 10,
+ WATCHDOG_TIMEOUT = 15,
+ MANAGEMENT_NOTIF = 16,
+ NOTIFICATION = 27,
+};
+
+enum message_source {
+ FW_MSG = 0,
+ DRV_MSG = 1,
+};
+
+enum notify_event_type {
+ STATE_NOTIF = 0x1,
+ CTRL_NOTIF = 0x2,
+ DPHY_NOTIF = 0x3,
+};
+
+enum ace_cmd_type {
+ ACE_CMD_GET = 3,
+ ACE_CMD_SET = 4,
+};
+
+enum ace_cmd_id {
+ IPU_OWN_CAMERA = 0x13,
+ ACE_OWN_CAMERA = 0x14,
+ GET_CAMERA_STATUS = 0x15,
+ GET_FW_ID = 0x1A,
+};
+
+struct ace_cmd_hdr {
+ uint32_t module_id : 16;
+ uint32_t instance_id : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+ uint32_t _hw_rsvd_2 : 2;
+} __packed;
+
+union ace_cmd_param {
+ uuid_le uuid;
+ uint32_t param;
+};
+
+struct ace_cmd {
+ struct ace_cmd_hdr hdr;
+ union ace_cmd_param param;
+} __packed;
+
+union ace_notif_hdr {
+ struct _response {
+ uint32_t status : 24;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed response;
+
+ struct _notify {
+ uint32_t rsvd2 : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t rsvd1 : 30;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed notify;
+
+ struct _management {
+ uint32_t event_id : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t event_data_size : 16;
+ uint32_t request_target : 1;
+ uint32_t request_type : 5;
+ uint32_t request_id : 8;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed management;
+};
+
+union ace_notif_cont {
+ uint16_t module_id;
+ uint8_t state_notif;
+ struct camera_status stat;
+};
+
+struct ace_notif {
+ union ace_notif_hdr hdr;
+ union ace_notif_cont cont;
+} __packed;
+
+struct mei_ace {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct ace_notif *cmd_resp;
+ struct completion response;
+
+ struct completion reply;
+ struct ace_notif *reply_notif;
+
+ uint16_t module_id;
+ uint16_t init_wait_q_woken;
+ wait_queue_head_t init_wait_q;
+};
+
+static inline void init_cmd_hdr(struct ace_cmd_hdr *hdr)
+{
+ memset(hdr, 0, sizeof(struct ace_cmd_hdr));
+
+ hdr->type = ACE_CMD_SET;
+ hdr->msg_tgt = DRV_MSG;
+ hdr->init_block = 1;
+ hdr->final_block = 1;
+}
+
+static uint16_t get_fw_id(struct mei_ace *ace)
+{
+ int ret;
+
+ ret = wait_event_interruptible(ace->init_wait_q,
+ ace->init_wait_q_woken);
+ if (ret < 0)
+ dev_warn(&ace->cldev->dev,
+ "incorrect fw id sent to fw\n");
+
+ return ace->module_id;
+}
+
+static int construct_command(struct mei_ace *ace, struct ace_cmd *cmd,
+ enum ace_cmd_id cmd_id)
+{
+ struct ace_cmd_hdr *hdr = &cmd->hdr;
+ union ace_cmd_param *param = &cmd->param;
+
+ init_cmd_hdr(hdr);
+
+ hdr->cmd_id = cmd_id;
+ switch (cmd_id) {
+ case GET_FW_ID:
+ param->uuid = UUID_GET_FW_ID;
+ hdr->param_size = sizeof(param->uuid);
+ break;
+ case ACE_OWN_CAMERA:
+ param->param = 0;
+ hdr->module_id = get_fw_id(ace);
+ hdr->param_size = sizeof(param->param);
+ break;
+ case IPU_OWN_CAMERA:
+ case GET_CAMERA_STATUS:
+ hdr->module_id = get_fw_id(ace);
+ break;
+ default:
+ dev_err(&ace->cldev->dev,
+ "sending not supported command");
+ break;
+ }
+
+ return hdr->param_size + sizeof(cmd->hdr);
+}
+
+static int send_command_sync(struct mei_ace *ace,
+ struct ace_cmd *cmd, size_t len)
+{
+ int ret;
+ struct ace_cmd_hdr *cmd_hdr = &cmd->hdr;
+ union ace_notif_hdr *resp_hdr = &ace->cmd_resp->hdr;
+ union ace_notif_hdr *reply_hdr = &ace->reply_notif->hdr;
+
+ reinit_completion(&ace->response);
+ reinit_completion(&ace->reply);
+
+ ret = mei_cldev_send(ace->cldev, (uint8_t *)cmd, len);
+ if (ret < 0) {
+ dev_err(&ace->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&ace->reply, ACE_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d notify reply error\n", cmd_hdr->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d notify reply timeout\n", cmd_hdr->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ if (reply_hdr->response.cmd_id != cmd_hdr->cmd_id) {
+ dev_err(&ace->cldev->dev,
+ "reply notify mismatch, sent %d but got %d\n",
+ cmd_hdr->cmd_id, reply_hdr->response.cmd_id);
+ return -1;
+ }
+
+ ret = reply_hdr->response.status;
+ if (ret) {
+ dev_err(&ace->cldev->dev,
+ "command %d reply wrong status = %d\n",
+ cmd_hdr->cmd_id, ret);
+ return -1;
+ }
+
+ ret = wait_for_completion_killable_timeout(&ace->response, ACE_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d response error\n", cmd_hdr->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&ace->cldev->dev,
+ "command %d response timeout\n", cmd_hdr->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ if (resp_hdr->management.request_id != cmd_hdr->cmd_id) {
+ dev_err(&ace->cldev->dev,
+ "command response mismatch, sent %d but got %d\n",
+ cmd_hdr->cmd_id, resp_hdr->management.request_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int trigger_get_fw_id(struct mei_ace *ace)
+{
+ int ret;
+ struct ace_cmd cmd;
+ size_t cmd_len;
+
+ cmd_len = construct_command(ace, &cmd, GET_FW_ID);
+
+ ret = mei_cldev_send(ace->cldev, (uint8_t *)&cmd, cmd_len);
+ if (ret < 0)
+ dev_err(&ace->cldev->dev,
+ "send get fw id command fail %d\n", ret);
+
+ return ret;
+}
+
+static int set_camera_ownership(struct mei_ace *ace,
+ enum ace_cmd_id cmd_id,
+ struct camera_status *status)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ union ace_notif_cont *cont;
+ int ret;
+
+ cmd_len = construct_command(ace, &cmd, cmd_id);
+
+ mutex_lock(&ace->cmd_mutex);
+
+ ret = send_command_sync(ace, &cmd, cmd_len);
+ if (!ret) {
+ cont = &ace->cmd_resp->cont;
+ memcpy(status, &cont->stat, sizeof(*status));
+ }
+
+ mutex_unlock(&ace->cmd_mutex);
+
+ return ret;
+}
+
+int ipu_own_camera(void *ace, struct camera_status *status)
+{
+ struct mei_ace *p_ace = (struct mei_ace *)ace;
+
+ return set_camera_ownership(p_ace, IPU_OWN_CAMERA, status);
+}
+
+int ace_own_camera(void *ace, struct camera_status *status)
+{
+ struct mei_ace *p_ace = (struct mei_ace *)ace;
+
+ return set_camera_ownership(p_ace, ACE_OWN_CAMERA, status);
+}
+
+int get_camera_status(void *ace, struct camera_status *status)
+{
+ int ret;
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ union ace_notif_cont *cont;
+ struct mei_ace *p_ace = (struct mei_ace *)ace;
+
+ cmd_len = construct_command(p_ace, &cmd, GET_CAMERA_STATUS);
+
+ mutex_lock(&p_ace->cmd_mutex);
+
+ ret = send_command_sync(p_ace, &cmd, cmd_len);
+ if (!ret) {
+ cont = &p_ace->cmd_resp->cont;
+ memcpy(status, &cont->stat, sizeof(*status));
+ }
+
+ mutex_unlock(&p_ace->cmd_mutex);
+
+ return ret;
+}
+
+static struct vsc_ace_ops ace_ops = {
+ .ace_own_camera = ace_own_camera,
+ .ipu_own_camera = ipu_own_camera,
+ .get_camera_status = get_camera_status,
+};
+
+static void handle_notify(struct mei_ace *ace, struct ace_notif *resp, int len)
+{
+ union ace_notif_hdr *hdr = &resp->hdr;
+ struct mei_cl_device *cldev = ace->cldev;
+
+ if (hdr->notify.msg_tgt != FW_MSG ||
+ hdr->notify.type != NOTIFICATION) {
+ dev_err(&cldev->dev, "recv incorrect notification\n");
+ return;
+ }
+
+ switch (hdr->notify.notif_type) {
+ /* firmware ready notification sent to driver
+ * after HECI client connected with firmware.
+ */
+ case FW_READY:
+ dev_info(&cldev->dev, "firmware ready\n");
+
+ trigger_get_fw_id(ace);
+ break;
+
+ case MANAGEMENT_NOTIF:
+ if (hdr->management.event_id == CTRL_NOTIF) {
+ switch (hdr->management.request_id) {
+ case GET_FW_ID:
+ dev_warn(&cldev->dev,
+ "shouldn't reach here\n");
+ break;
+
+ case ACE_OWN_CAMERA:
+ case IPU_OWN_CAMERA:
+ case GET_CAMERA_STATUS:
+ memcpy(ace->cmd_resp, resp, len);
+
+ if (!completion_done(&ace->response))
+ complete(&ace->response);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "incorrect command id notif\n");
+ break;
+ }
+ }
+ break;
+
+ case EXCEPTION:
+ dev_err(&cldev->dev, "firmware exception\n");
+ break;
+
+ case WATCHDOG_TIMEOUT:
+ dev_err(&cldev->dev, "firmware watchdog timeout\n");
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv unknown notification(%d)\n",
+ hdr->notify.notif_type);
+ break;
+ }
+}
+
+ /* callback for command response receive */
+static void mei_ace_rx(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+ int ret;
+ struct ace_notif resp;
+ union ace_notif_hdr *hdr = &resp.hdr;
+
+ ret = mei_cldev_recv(cldev, (uint8_t *)&resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ } else if (ret < sizeof(union ace_notif_hdr)) {
+ dev_err(&cldev->dev, "recv small data %d\n", ret);
+ return;
+ }
+
+ switch (hdr->notify.rsp) {
+ case REPLY:
+ if (hdr->response.cmd_id == GET_FW_ID) {
+ ace->module_id = resp.cont.module_id;
+
+ ace->init_wait_q_woken = true;
+ wake_up_all(&ace->init_wait_q);
+
+ dev_info(&cldev->dev, "recv firmware id\n");
+ } else {
+ memcpy(ace->reply_notif, &resp, ret);
+
+ if (!completion_done(&ace->reply))
+ complete(&ace->reply);
+ }
+ break;
+
+ case NOTIF:
+ handle_notify(ace, &resp, ret);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv unknown response(%d)\n", hdr->notify.rsp);
+ break;
+ }
+}
+
+static int mei_ace_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_ace *ace;
+ int ret;
+ uint8_t *addr;
+ size_t ace_size = sizeof(struct mei_ace);
+ size_t reply_size = sizeof(struct ace_notif);
+ size_t response_size = sizeof(struct ace_notif);
+
+ ace = kzalloc(ace_size + response_size + reply_size, GFP_KERNEL);
+ if (!ace)
+ return -ENOMEM;
+
+ addr = (uint8_t *)ace;
+ ace->cmd_resp = (struct ace_notif *)(addr + ace_size);
+
+ addr = (uint8_t *)ace->cmd_resp;
+ ace->reply_notif = (struct ace_notif *)(addr + response_size);
+
+ ace->cldev = cldev;
+
+ ace->init_wait_q_woken = false;
+ init_waitqueue_head(&ace->init_wait_q);
+
+ mutex_init(&ace->cmd_mutex);
+ init_completion(&ace->response);
+ init_completion(&ace->reply);
+
+ mei_cldev_set_drvdata(cldev, ace);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable ace client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_ace_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register rx cb ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ trigger_get_fw_id(ace);
+
+ vsc_register_ace(ace, &ace_ops);
+ return 0;
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ kfree(ace);
+
+ return ret;
+}
+
+static void mei_ace_remove(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+
+ vsc_unregister_ace();
+
+ if (!completion_done(&ace->response))
+ complete(&ace->response);
+
+ if (!completion_done(&ace->reply))
+ complete(&ace->reply);
+
+ if (wq_has_sleeper(&ace->init_wait_q))
+ wake_up_all(&ace->init_wait_q);
+
+ mei_cldev_disable(cldev);
+
+ /* wait until no buffer access */
+ mutex_lock(&ace->cmd_mutex);
+ mutex_unlock(&ace->cmd_mutex);
+
+ kfree(ace);
+}
+
+#define MEI_UUID_ACE UUID_LE(0x5DB76CF6, 0x0A68, 0x4ED6, \
+ 0x9B, 0x78, 0x03, 0x61, 0x63, 0x5E, 0x24, 0x47)
+
+static const struct mei_cl_device_id mei_ace_tbl[] = {
+ { MEI_ACE_DRIVER_NAME, MEI_UUID_ACE, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_ace_tbl);
+
+static struct mei_cl_driver mei_ace_driver = {
+ .id_table = mei_ace_tbl,
+ .name = MEI_ACE_DRIVER_NAME,
+
+ .probe = mei_ace_probe,
+ .remove = mei_ace_remove,
+};
+
+static int __init mei_ace_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_ace_driver);
+ if (ret) {
+ pr_err("mei ace driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_ace_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_ace_driver);
+}
+
+module_init(mei_ace_init);
+module_exit(mei_ace_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC ACE client");
new file mode 100644
@@ -0,0 +1,696 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+#include <linux/uuid.h>
+
+#define MAX_RECV_SIZE 8192
+#define MAX_LOG_SIZE 0x40000000
+#define LOG_CONFIG_PARAM_COUNT 7
+#define COMMAND_TIMEOUT (5 * HZ)
+#define ACE_LOG_FILE "/var/log/vsc_ace.log"
+#define MEI_ACE_DEBUG_DRIVER_NAME "vsc_ace_debug"
+
+enum notif_rsp {
+ NOTIF = 0,
+ REPLY = 1,
+};
+
+enum message_source {
+ FW_MSG = 0,
+ DRV_MSG = 1,
+};
+
+enum notify_type {
+ LOG_BUFFER_STATUS = 6,
+ FW_READY = 8,
+ MANAGEMENT_NOTIF = 16,
+ NOTIFICATION = 27,
+};
+
+enum notify_event_type {
+ STATE_NOTIF = 1,
+ CTRL_NOTIF = 2,
+};
+
+enum ace_cmd_id {
+ GET_FW_VER = 0,
+ LOG_CONFIG = 6,
+ SET_SYS_TIME = 20,
+ GET_FW_ID = 26,
+};
+
+enum ace_cmd_type {
+ ACE_CMD_GET = 3,
+ ACE_CMD_SET = 4,
+};
+
+struct firmware_version {
+ uint32_t type;
+ uint32_t len;
+
+ uint16_t major;
+ uint16_t minor;
+ uint16_t hotfix;
+ uint16_t build;
+} __packed;
+
+union tracing_config {
+ struct _uart_config {
+ uint32_t instance;
+ uint32_t baudrate;
+ } __packed uart;
+
+ struct _i2c_config {
+ uint32_t instance;
+ uint32_t speed;
+ uint32_t address;
+ uint32_t reg;
+ } __packed i2c;
+};
+
+struct log_config {
+ uint32_t aging_period;
+ uint32_t fifo_period;
+ uint32_t enable;
+ uint32_t priority_mask[16];
+ uint32_t tracing_method;
+ uint32_t tracing_format;
+ union tracing_config config;
+} __packed;
+
+struct ace_cmd_hdr {
+ uint32_t module_id : 16;
+ uint32_t instance_id : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+ uint32_t _hw_rsvd_2 : 2;
+} __packed;
+
+union ace_cmd_param {
+ uuid_le uuid;
+ uint64_t time;
+ struct log_config config;
+};
+
+struct ace_cmd {
+ struct ace_cmd_hdr hdr;
+ union ace_cmd_param param;
+} __packed;
+
+union ace_notif_hdr {
+ struct _response {
+ uint32_t status : 24;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t param_size : 20;
+ uint32_t cmd_id : 8;
+ uint32_t final_block : 1;
+ uint32_t init_block : 1;
+
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed response;
+
+ struct _notify {
+ uint32_t rsvd2 : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t rsvd1 : 30;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed notify;
+
+ struct _log_notify {
+ uint32_t rsvd0 : 12;
+ uint32_t source_core : 4;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t rsvd1 : 30;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed log_notify;
+
+ struct _management {
+ uint32_t event_id : 16;
+ uint32_t notif_type : 8;
+ uint32_t type : 5;
+ uint32_t rsp : 1;
+ uint32_t msg_tgt : 1;
+ uint32_t _hw_rsvd_0 : 1;
+
+ uint32_t event_data_size : 16;
+ uint32_t request_target : 1;
+ uint32_t request_type : 5;
+ uint32_t request_id : 8;
+ uint32_t _hw_rsvd_2 : 2;
+ } __packed management;
+};
+
+union ace_notif_cont {
+ uint16_t module_id;
+ struct firmware_version version;
+};
+
+struct ace_notif {
+ union ace_notif_hdr hdr;
+ union ace_notif_cont cont;
+} __packed;
+
+struct mei_ace_debug {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct ace_notif cmd_resp;
+ struct completion response;
+
+ struct completion reply;
+ struct ace_notif reply_notif;
+
+ loff_t pos;
+ struct file *ace_file;
+
+ uint8_t *recv_buf;
+
+ struct dentry *dfs_dir;
+};
+
+static inline void init_cmd_hdr(struct ace_cmd_hdr *hdr)
+{
+ memset(hdr, 0, sizeof(struct ace_cmd_hdr));
+
+ hdr->type = ACE_CMD_SET;
+ hdr->msg_tgt = DRV_MSG;
+ hdr->init_block = 1;
+ hdr->final_block = 1;
+}
+
+static int construct_command(struct mei_ace_debug *ad,
+ struct ace_cmd *cmd,
+ enum ace_cmd_id cmd_id,
+ void *user_data)
+{
+ struct ace_cmd_hdr *hdr = &cmd->hdr;
+ union ace_cmd_param *param = &cmd->param;
+
+ init_cmd_hdr(hdr);
+
+ hdr->cmd_id = cmd_id;
+ switch (cmd_id) {
+ case GET_FW_VER:
+ hdr->type = ACE_CMD_GET;
+ break;
+ case SET_SYS_TIME:
+ param->time = ktime_get_ns();
+ hdr->param_size = sizeof(param->time);
+ break;
+ case GET_FW_ID:
+ memcpy(¶m->uuid, user_data, sizeof(param->uuid));
+ hdr->param_size = sizeof(param->uuid);
+ break;
+ case LOG_CONFIG:
+ memcpy(¶m->config, user_data, sizeof(param->config));
+ hdr->param_size = sizeof(param->config);
+ break;
+ default:
+ dev_err(&ad->cldev->dev,
+ "sending not supported command");
+ break;
+ }
+
+ return hdr->param_size + sizeof(cmd->hdr);
+}
+
+static int send_command_sync(struct mei_ace_debug *ad,
+ struct ace_cmd *cmd, size_t len)
+{
+ int ret;
+ struct ace_cmd_hdr *cmd_hdr = &cmd->hdr;
+ union ace_notif_hdr *reply_hdr = &ad->reply_notif.hdr;
+
+ reinit_completion(&ad->reply);
+
+ ret = mei_cldev_send(ad->cldev, (uint8_t *)cmd, len);
+ if (ret < 0) {
+ dev_err(&ad->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&ad->reply, COMMAND_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&ad->cldev->dev,
+ "command %d notify reply error\n", cmd_hdr->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&ad->cldev->dev,
+ "command %d notify reply timeout\n", cmd_hdr->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ if (reply_hdr->response.cmd_id != cmd_hdr->cmd_id) {
+ dev_err(&ad->cldev->dev,
+ "reply notify mismatch, sent %d but got %d\n",
+ cmd_hdr->cmd_id, reply_hdr->response.cmd_id);
+ return -1;
+ }
+
+ ret = reply_hdr->response.status;
+ if (ret) {
+ dev_err(&ad->cldev->dev,
+ "command %d reply wrong status = %d\n",
+ cmd_hdr->cmd_id, ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int set_system_time(struct mei_ace_debug *ad)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd_len = construct_command(ad, &cmd, SET_SYS_TIME, NULL);
+
+ mutex_lock(&ad->cmd_mutex);
+ ret = send_command_sync(ad, &cmd, cmd_len);
+ mutex_unlock(&ad->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t ad_time_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_ace_debug *ad = file->private_data;
+ int ret;
+
+ ret = set_system_time(ad);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int config_log(struct mei_ace_debug *ad, struct log_config *config)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ int ret;
+
+ cmd_len = construct_command(ad, &cmd, LOG_CONFIG, config);
+
+ mutex_lock(&ad->cmd_mutex);
+ ret = send_command_sync(ad, &cmd, cmd_len);
+ mutex_unlock(&ad->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t ad_log_config_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_ace_debug *ad = file->private_data;
+ int ret;
+ uint8_t *buf;
+ struct log_config config = {0};
+
+ buf = memdup_user_nul(ubuf, min(count, (size_t)(PAGE_SIZE - 1)));
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = sscanf(buf, "%u %u %u %u %u %u %u",
+ &config.aging_period,
+ &config.fifo_period,
+ &config.enable,
+ &config.priority_mask[0],
+ &config.priority_mask[1],
+ &config.tracing_format,
+ &config.tracing_method);
+ if (ret != LOG_CONFIG_PARAM_COUNT) {
+ dev_err(&ad->cldev->dev,
+ "please input all the required parameters\n");
+ return -EINVAL;
+ }
+
+ ret = config_log(ad, &config);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int get_firmware_version(struct mei_ace_debug *ad,
+ struct firmware_version *version)
+{
+ struct ace_cmd cmd;
+ size_t cmd_len;
+ union ace_notif_cont *cont;
+ int ret;
+
+ cmd_len = construct_command(ad, &cmd, GET_FW_VER, NULL);
+
+ mutex_lock(&ad->cmd_mutex);
+ ret = send_command_sync(ad, &cmd, cmd_len);
+ if (!ret) {
+ cont = &ad->reply_notif.cont;
+ memcpy(version, &cont->version, sizeof(*version));
+ }
+ mutex_unlock(&ad->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t ad_firmware_version_read(struct file *file,
+ char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_ace_debug *ad = file->private_data;
+ int ret, pos;
+ struct firmware_version version;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = get_firmware_version(ad, &version);
+ if (ret)
+ goto out;
+
+ pos = snprintf((char *)addr, PAGE_SIZE,
+ "firmware version: %u.%u.%u.%u\n",
+ version.major, version.minor,
+ version.hotfix, version.build);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+#define AD_DFS_ADD_FILE(name) \
+ debugfs_create_file(#name, 0644, ad->dfs_dir, ad, \
+ &ad_dfs_##name##_fops)
+
+#define AD_DFS_FILE_OPS(name) \
+static const struct file_operations ad_dfs_##name##_fops = { \
+ .read = ad_##name##_read, \
+ .write = ad_##name##_write, \
+ .open = simple_open, \
+}
+
+#define AD_DFS_FILE_READ_OPS(name) \
+static const struct file_operations ad_dfs_##name##_fops = { \
+ .read = ad_##name##_read, \
+ .open = simple_open, \
+}
+
+#define AD_DFS_FILE_WRITE_OPS(name) \
+static const struct file_operations ad_dfs_##name##_fops = { \
+ .write = ad_##name##_write, \
+ .open = simple_open, \
+}
+
+AD_DFS_FILE_WRITE_OPS(time);
+AD_DFS_FILE_WRITE_OPS(log_config);
+
+AD_DFS_FILE_READ_OPS(firmware_version);
+
+static void handle_notify(struct mei_ace_debug *ad,
+ struct ace_notif *notif, int len)
+{
+ int ret;
+ struct file *file;
+ loff_t *pos;
+ union ace_notif_hdr *hdr = ¬if->hdr;
+ struct mei_cl_device *cldev = ad->cldev;
+
+ if (hdr->notify.msg_tgt != FW_MSG ||
+ hdr->notify.type != NOTIFICATION) {
+ dev_err(&cldev->dev, "recv wrong notification\n");
+ return;
+ }
+
+ switch (hdr->notify.notif_type) {
+ case FW_READY:
+ /* firmware ready notification sent to driver
+ * after HECI client connected with firmware.
+ */
+ dev_info(&cldev->dev, "firmware ready\n");
+ break;
+
+ case LOG_BUFFER_STATUS:
+ if (ad->pos < MAX_LOG_SIZE) {
+ pos = &ad->pos;
+ file = ad->ace_file;
+
+ ret = kernel_write(file,
+ (uint8_t *)notif + sizeof(*hdr),
+ len - sizeof(*hdr),
+ pos);
+ if (ret < 0)
+ dev_err(&cldev->dev,
+ "error in writing log %d\n", ret);
+ else
+ *pos += ret;
+ } else
+ dev_warn(&cldev->dev,
+ "already exceed max log size\n");
+ break;
+
+ case MANAGEMENT_NOTIF:
+ if (hdr->management.event_id == CTRL_NOTIF) {
+ switch (hdr->management.request_id) {
+ case GET_FW_VER:
+ case LOG_CONFIG:
+ case SET_SYS_TIME:
+ case GET_FW_ID:
+ memcpy(&ad->cmd_resp, notif, len);
+
+ if (!completion_done(&ad->response))
+ complete(&ad->response);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "wrong command id(%d) notif\n",
+ hdr->management.request_id);
+ break;
+ }
+ }
+ break;
+
+ default:
+ dev_info(&cldev->dev,
+ "unexpected notify(%d)\n", hdr->notify.notif_type);
+ break;
+ }
+}
+
+/* callback for command response receive */
+static void mei_ace_debug_rx(struct mei_cl_device *cldev)
+{
+ struct mei_ace_debug *ad = mei_cldev_get_drvdata(cldev);
+ int ret;
+ struct ace_notif *notif;
+ union ace_notif_hdr *hdr;
+
+ ret = mei_cldev_recv(cldev, ad->recv_buf, MAX_RECV_SIZE);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ } else if (ret < sizeof(union ace_notif_hdr)) {
+ dev_err(&cldev->dev, "recv small data %d\n", ret);
+ return;
+ }
+ notif = (struct ace_notif *)ad->recv_buf;
+ hdr = ¬if->hdr;
+
+ switch (hdr->notify.rsp) {
+ case REPLY:
+ memcpy(&ad->reply_notif, notif, sizeof(struct ace_notif));
+
+ if (!completion_done(&ad->reply))
+ complete(&ad->reply);
+ break;
+
+ case NOTIF:
+ handle_notify(ad, notif, ret);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "unexpected response(%d)\n", hdr->notify.rsp);
+ break;
+ }
+}
+
+static int mei_ace_debug_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_ace_debug *ad;
+ int ret;
+ uint32_t order = get_order(MAX_RECV_SIZE);
+
+ ad = kzalloc(sizeof(struct mei_ace_debug), GFP_KERNEL);
+ if (!ad)
+ return -ENOMEM;
+
+ ad->recv_buf = (uint8_t *)__get_free_pages(GFP_KERNEL, order);
+ if (!ad->recv_buf) {
+ kfree(ad);
+ return -ENOMEM;
+ }
+
+ ad->cldev = cldev;
+
+ mutex_init(&ad->cmd_mutex);
+ init_completion(&ad->response);
+ init_completion(&ad->reply);
+
+ mei_cldev_set_drvdata(cldev, ad);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable ace debug client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_ace_debug_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register ace debug rx cb ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ ad->ace_file = filp_open(ACE_LOG_FILE,
+ O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC,
+ 0600);
+ if (IS_ERR(ad->ace_file)) {
+ dev_err(&cldev->dev,
+ "filp_open(%s) failed\n", ACE_LOG_FILE);
+ ret = PTR_ERR(ad->ace_file);
+ goto err_disable;
+ }
+ ad->pos = 0;
+
+ ad->dfs_dir = debugfs_create_dir("vsc_ace", NULL);
+ if (ad->dfs_dir) {
+ AD_DFS_ADD_FILE(log_config);
+ AD_DFS_ADD_FILE(time);
+ AD_DFS_ADD_FILE(firmware_version);
+ }
+
+ return 0;
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ free_pages((unsigned long)ad->recv_buf, order);
+
+ kfree(ad);
+
+ return ret;
+}
+
+static void mei_ace_debug_remove(struct mei_cl_device *cldev)
+{
+ uint32_t order = get_order(MAX_RECV_SIZE);
+ struct mei_ace_debug *ad = mei_cldev_get_drvdata(cldev);
+
+ if (!completion_done(&ad->response))
+ complete(&ad->response);
+
+ if (!completion_done(&ad->reply))
+ complete(&ad->reply);
+
+ mei_cldev_disable(cldev);
+
+ debugfs_remove_recursive(ad->dfs_dir);
+
+ filp_close(ad->ace_file, NULL);
+
+ /* wait until no buffer access */
+ mutex_lock(&ad->cmd_mutex);
+ mutex_unlock(&ad->cmd_mutex);
+
+ free_pages((unsigned long)ad->recv_buf, order);
+
+ kfree(ad);
+}
+
+#define MEI_UUID_ACE_DEBUG UUID_LE(0xFB285857, 0xFC24, 0x4BF3, 0xBD, \
+ 0x80, 0x2A, 0xBC, 0x44, 0xE3, 0xC2, 0x0B)
+
+static const struct mei_cl_device_id mei_ace_debug_tbl[] = {
+ { MEI_ACE_DEBUG_DRIVER_NAME, MEI_UUID_ACE_DEBUG, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_ace_debug_tbl);
+
+static struct mei_cl_driver mei_ace_debug_driver = {
+ .id_table = mei_ace_debug_tbl,
+ .name = MEI_ACE_DEBUG_DRIVER_NAME,
+
+ .probe = mei_ace_debug_probe,
+ .remove = mei_ace_debug_remove,
+};
+
+static int __init mei_ace_debug_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_ace_debug_driver);
+ if (ret) {
+ pr_err("mei ace debug driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_ace_debug_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_ace_debug_driver);
+}
+
+module_init(mei_ace_debug_init);
+module_exit(mei_ace_debug_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC ACE debug client");
new file mode 100644
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uuid.h>
+#include <linux/vsc.h>
+
+#include "intel_vsc.h"
+
+#define CSI_TIMEOUT (5 * HZ)
+#define MEI_CSI_DRIVER_NAME "vsc_csi"
+
+/**
+ * Identify the command id that can be downloaded
+ * to firmware, as well as the privacy notify id
+ * used when processing privacy actions.
+ *
+ * This enumeration is local to the mei csi.
+ */
+enum csi_cmd_id {
+ /* used to set csi ownership */
+ CSI_SET_OWNER,
+
+ /* used to get csi ownership */
+ CSI_GET_OWNER,
+
+ /* used to configurate mipi */
+ CSI_SET_MIPI_CONF,
+
+ /* used to get current mipi configuration */
+ CSI_GET_MIPI_CONF,
+
+ /* used to set csi power state */
+ CSI_SET_POWER_STATE,
+
+ /* used to get csi power state */
+ CSI_GET_POWER_STATE,
+
+ /* privacy notification id used when privacy state changes */
+ CSI_PRIVACY_NOTIF,
+};
+
+enum privacy_state {
+ PRIVACY_OFF = 0,
+ PRIVACY_ON,
+};
+
+/**
+ * CSI command structure.
+ */
+struct csi_cmd {
+ uint32_t cmd_id;
+ union _cmd_param {
+ uint32_t param;
+ struct mipi_conf conf;
+ } param;
+} __packed;
+
+/**
+ * CSI command response structure.
+ */
+struct csi_notif {
+ uint32_t cmd_id;
+ int status;
+ union _resp_cont {
+ uint32_t cont;
+ struct mipi_conf conf;
+ } cont;
+} __packed;
+
+struct mei_csi {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct csi_notif *notif;
+ struct completion response;
+
+ spinlock_t privacy_lock;
+ void *handle;
+ vsc_privacy_callback_t callback;
+};
+
+static int mei_csi_send(struct mei_csi *csi, uint8_t *buf, size_t len)
+{
+ struct csi_cmd *cmd = (struct csi_cmd *)buf;
+ int ret;
+
+ reinit_completion(&csi->response);
+
+ ret = mei_cldev_send(csi->cldev, buf, len);
+ if (ret < 0) {
+ dev_err(&csi->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&csi->response, CSI_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&csi->cldev->dev,
+ "command %d response error\n", cmd->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&csi->cldev->dev,
+ "command %d response timeout\n", cmd->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ ret = csi->notif->status;
+ if (ret == -1) {
+ dev_info(&csi->cldev->dev,
+ "privacy on, command id = %d\n", cmd->cmd_id);
+ ret = 0;
+ } else if (ret) {
+ dev_err(&csi->cldev->dev,
+ "command %d response fail %d\n", cmd->cmd_id, ret);
+ return ret;
+ }
+
+ if (csi->notif->cmd_id != cmd->cmd_id) {
+ dev_err(&csi->cldev->dev,
+ "command response id mismatch, sent %d but got %d\n",
+ cmd->cmd_id, csi->notif->cmd_id);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int csi_set_owner(void *csi, enum csi_owner owner)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_SET_OWNER;
+ cmd.param.param = owner;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_get_owner(void *csi, enum csi_owner *owner)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_GET_OWNER;
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ if (!ret)
+ *owner = p_csi->notif->cont.cont;
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_set_mipi_conf(void *csi, struct mipi_conf *conf)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_SET_MIPI_CONF;
+ cmd.param.conf.freq = conf->freq;
+ cmd.param.conf.lane_num = conf->lane_num;
+ cmd_len += sizeof(cmd.param.conf);
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_get_mipi_conf(void *csi, struct mipi_conf *conf)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ struct mipi_conf *res;
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_GET_MIPI_CONF;
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ if (!ret) {
+ res = &p_csi->notif->cont.conf;
+ conf->freq = res->freq;
+ conf->lane_num = res->lane_num;
+ }
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_set_power_state(void *csi, enum csi_power_state state)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_SET_POWER_STATE;
+ cmd.param.param = state;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static int csi_get_power_state(void *csi, enum csi_power_state *state)
+{
+ struct csi_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ cmd.cmd_id = CSI_GET_POWER_STATE;
+
+ mutex_lock(&p_csi->cmd_mutex);
+ ret = mei_csi_send(p_csi, (uint8_t *)&cmd, cmd_len);
+ if (!ret)
+ *state = p_csi->notif->cont.cont;
+ mutex_unlock(&p_csi->cmd_mutex);
+
+ return ret;
+}
+
+static void csi_set_privacy_callback(void *csi,
+ vsc_privacy_callback_t callback,
+ void *handle)
+{
+ unsigned long flags;
+ struct mei_csi *p_csi = (struct mei_csi *)csi;
+
+ spin_lock_irqsave(&p_csi->privacy_lock, flags);
+ p_csi->callback = callback;
+ p_csi->handle = handle;
+ spin_unlock_irqrestore(&p_csi->privacy_lock, flags);
+}
+
+static struct vsc_csi_ops csi_ops = {
+ .set_owner = csi_set_owner,
+ .get_owner = csi_get_owner,
+ .set_mipi_conf = csi_set_mipi_conf,
+ .get_mipi_conf = csi_get_mipi_conf,
+ .set_power_state = csi_set_power_state,
+ .get_power_state = csi_get_power_state,
+ .set_privacy_callback = csi_set_privacy_callback,
+};
+
+static void privacy_notify(struct mei_csi *csi, uint8_t state)
+{
+ unsigned long flags;
+ void *handle;
+ vsc_privacy_callback_t callback;
+
+ spin_lock_irqsave(&csi->privacy_lock, flags);
+ callback = csi->callback;
+ handle = csi->handle;
+ spin_unlock_irqrestore(&csi->privacy_lock, flags);
+
+ if (callback)
+ callback(handle, state);
+}
+
+/**
+ * callback for command response receive
+ */
+static void mei_csi_rx(struct mei_cl_device *cldev)
+{
+ int ret;
+ struct csi_notif notif = {0};
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+
+ ret = mei_cldev_recv(cldev, (uint8_t *)¬if,
+ sizeof(struct csi_notif));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ }
+
+ switch (notif.cmd_id) {
+ case CSI_PRIVACY_NOTIF:
+ switch (notif.cont.cont) {
+ case PRIVACY_ON:
+ privacy_notify(csi, 0);
+
+ dev_info(&cldev->dev, "privacy on\n");
+ break;
+
+ case PRIVACY_OFF:
+ privacy_notify(csi, 1);
+
+ dev_info(&cldev->dev, "privacy off\n");
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv privacy wrong state\n");
+ break;
+ }
+ break;
+
+ case CSI_SET_OWNER:
+ case CSI_GET_OWNER:
+ case CSI_SET_MIPI_CONF:
+ case CSI_GET_MIPI_CONF:
+ case CSI_SET_POWER_STATE:
+ case CSI_GET_POWER_STATE:
+ memcpy(csi->notif, ¬if, ret);
+
+ if (!completion_done(&csi->response))
+ complete(&csi->response);
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv not supported notification(%d)\n",
+ notif.cmd_id);
+ break;
+ }
+}
+
+static int mei_csi_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_csi *csi;
+ int ret;
+ uint8_t *p;
+ size_t csi_size = sizeof(struct mei_csi);
+
+ p = kzalloc(csi_size + sizeof(struct csi_notif), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ csi = (struct mei_csi *)p;
+ csi->notif = (struct csi_notif *)(p + csi_size);
+
+ csi->cldev = cldev;
+
+ mutex_init(&csi->cmd_mutex);
+ init_completion(&csi->response);
+
+ spin_lock_init(&csi->privacy_lock);
+
+ mei_cldev_set_drvdata(cldev, csi);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable csi client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_csi_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register rx event ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ vsc_register_csi(csi, &csi_ops);
+
+ return 0;
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ kfree(csi);
+
+ return ret;
+}
+
+static void mei_csi_remove(struct mei_cl_device *cldev)
+{
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+
+ vsc_unregister_csi();
+
+ if (!completion_done(&csi->response))
+ complete(&csi->response);
+
+ mei_cldev_disable(cldev);
+
+ /* wait until no buffer access */
+ mutex_lock(&csi->cmd_mutex);
+ mutex_unlock(&csi->cmd_mutex);
+
+ kfree(csi);
+}
+
+#define MEI_UUID_CSI UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+static const struct mei_cl_device_id mei_csi_tbl[] = {
+ { MEI_CSI_DRIVER_NAME, MEI_UUID_CSI, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_csi_tbl);
+
+static struct mei_cl_driver mei_csi_driver = {
+ .id_table = mei_csi_tbl,
+ .name = MEI_CSI_DRIVER_NAME,
+
+ .probe = mei_csi_probe,
+ .remove = mei_csi_remove,
+};
+
+static int __init mei_csi_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_csi_driver);
+ if (ret) {
+ pr_err("mei csi driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_csi_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_csi_driver);
+}
+
+module_init(mei_csi_init);
+module_exit(mei_csi_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC CSI client");
new file mode 100644
@@ -0,0 +1,944 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Intel Corporation */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/timekeeping.h>
+#include <linux/uuid.h>
+
+#define MEI_PSE_DRIVER_NAME "vsc_pse"
+
+#define PSE_TIMEOUT (5 * HZ)
+
+#define CONT_OFFSET offsetof(struct pse_notif, cont)
+#define NOTIF_HEADER_LEN 8
+
+#define MAX_RECV_SIZE 8192
+#define MAX_LOG_SIZE 0x40000000
+#define EM_LOG_FILE "/var/log/vsc_em.log"
+#define SEM_LOG_FILE "/var/log/vsc_sem.log"
+
+#define PM_SUBSYS_MAX 2
+#define PM_STATE_NAME_LEN 16
+#define DEV_NUM 64
+#define DEV_NAME_LEN 32
+
+#define FORMAT "|%16.32s |%12u "
+#define FORMAT_TAIL "|\n"
+#define CONSTRUCTED_FORMAT (FORMAT FORMAT FORMAT FORMAT FORMAT_TAIL)
+#define TITLE "| Device Name | Block Count "
+#define TITLE_TAIL "|"
+#define CONSTRUCTED_TITLE (TITLE TITLE TITLE TITLE TITLE_TAIL)
+
+enum pse_cmd_id {
+ LOG_ONOFF = 0,
+ SET_WATERMARK = 1,
+ DUMP_TRACE = 2,
+ SET_TIMEOUT = 3,
+ SET_LOG_LEVEL = 4,
+ SET_TIME = 5,
+ GET_TIME = 6,
+ DUMP_POWER_DATA = 7,
+ TRACE_DATA_NOTIF = 8,
+ GET_FW_VER = 10,
+};
+
+enum pm_state {
+ ACTIVE = 0,
+ CORE_HALT,
+ CORE_CLK_GATE,
+ DEEP_SLEEP,
+ STATE_MAX,
+};
+
+struct fw_version {
+ uint32_t major;
+ uint32_t minor;
+ uint32_t hotfix;
+ uint32_t build;
+} __packed;
+
+struct dev_info {
+ char name[DEV_NAME_LEN];
+ uint32_t block_cnt;
+} __packed;
+
+struct dev_list {
+ struct dev_info dev[DEV_NUM];
+ uint32_t dev_cnt;
+} __packed;
+
+struct pm_status {
+ char name[PM_STATE_NAME_LEN];
+ uint64_t start;
+ uint64_t end;
+ uint64_t duration;
+ uint64_t count;
+} __packed;
+
+struct pm_subsys {
+ uint64_t total;
+ struct pm_status status[STATE_MAX];
+ struct dev_list dev;
+ uint16_t crc;
+} __packed;
+
+struct pm_data {
+ struct pm_subsys subsys[PM_SUBSYS_MAX];
+} __packed;
+
+struct pse_cmd {
+ uint32_t cmd_id;
+ union _cmd_param {
+ uint32_t param;
+ uint64_t time;
+ } param;
+} __packed;
+
+struct pse_notif {
+ uint32_t cmd_id;
+ int8_t status;
+ uint8_t source;
+ int16_t size;
+ union _resp_cont {
+ uint64_t time;
+ struct fw_version ver;
+ } cont;
+} __packed;
+
+struct mei_pse {
+ struct mei_cl_device *cldev;
+
+ struct mutex cmd_mutex;
+ struct pse_notif notif;
+ struct completion response;
+
+ uint8_t *recv_buf;
+
+ uint8_t *pm_data;
+ uint32_t pm_data_pos;
+
+ loff_t em_pos;
+ struct file *em_file;
+
+ loff_t sem_pos;
+ struct file *sem_file;
+
+ struct dentry *dfs_dir;
+};
+
+static int mei_pse_send(struct mei_pse *pse, struct pse_cmd *cmd, size_t len)
+{
+ int ret;
+
+ reinit_completion(&pse->response);
+
+ ret = mei_cldev_send(pse->cldev, (uint8_t *)cmd, len);
+ if (ret < 0) {
+ dev_err(&pse->cldev->dev,
+ "send command fail %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_killable_timeout(&pse->response, PSE_TIMEOUT);
+ if (ret < 0) {
+ dev_err(&pse->cldev->dev,
+ "command %d response error\n", cmd->cmd_id);
+ return ret;
+ } else if (ret == 0) {
+ dev_err(&pse->cldev->dev,
+ "command %d response timeout\n", cmd->cmd_id);
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ ret = pse->notif.status;
+ if (ret) {
+ dev_err(&pse->cldev->dev,
+ "command %d response fail %d\n", cmd->cmd_id, ret);
+ return ret;
+ }
+
+ if (pse->notif.cmd_id != cmd->cmd_id) {
+ dev_err(&pse->cldev->dev,
+ "command response id mismatch, sent %d but got %d\n",
+ cmd->cmd_id, pse->notif.cmd_id);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int pse_log_onoff(struct mei_pse *pse, uint8_t onoff)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = LOG_ONOFF;
+ cmd.param.param = onoff;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_log_onoff_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret;
+ uint8_t state;
+
+ ret = kstrtou8_from_user(buf, count, 0, &state);
+ if (ret)
+ return ret;
+
+ pse_log_onoff(pse, state);
+
+ return count;
+}
+
+static int pse_set_watermark(struct mei_pse *pse, int val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ if (val < -1 || val > 100) {
+ dev_err(&pse->cldev->dev, "error water mark value\n");
+ return -1;
+ }
+
+ cmd.cmd_id = SET_WATERMARK;
+ cmd.param.param = val;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_watermark_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, val;
+
+ ret = kstrtoint_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ pse_set_watermark(pse, val);
+
+ return count;
+}
+
+static int pse_dump_trace(struct mei_pse *pse)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = DUMP_TRACE;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_dump_trace_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret;
+ uint8_t val;
+
+ ret = kstrtou8_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ return -EINVAL;
+
+ pse_dump_trace(pse);
+
+ return count;
+}
+
+static int pse_set_timeout(struct mei_pse *pse, int val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ if (val < -1 || val > 999) {
+ dev_err(&pse->cldev->dev, "error timeout value\n");
+ return -1;
+ }
+
+ cmd.cmd_id = SET_TIMEOUT;
+ cmd.param.param = val;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_timeout_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, val;
+
+ ret = kstrtoint_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ pse_set_timeout(pse, val);
+
+ return count;
+}
+
+static int pse_set_log_level(struct mei_pse *pse, int val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ if (val < 0 || val > 4) {
+ dev_err(&pse->cldev->dev, "unsupported log level\n");
+ return -1;
+ }
+
+ cmd.cmd_id = SET_LOG_LEVEL;
+ cmd.param.param = val;
+ cmd_len += sizeof(cmd.param.param);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_log_level_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, val;
+
+ ret = kstrtoint_from_user(buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ pse_set_log_level(pse, val);
+
+ return count;
+}
+
+static int pse_set_time(struct mei_pse *pse)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = SET_TIME;
+ cmd.param.time = ktime_get_ns();
+ cmd_len += sizeof(cmd.param.time);
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_time_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret;
+
+ ret = pse_set_time(pse);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static int pse_get_time(struct mei_pse *pse, uint64_t *val)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = GET_TIME;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+ if (!ret) {
+ *val = pse->notif.cont.time;
+
+ dev_info(&pse->cldev->dev,
+ "time = (%llu) nanoseconds\n", *val);
+ }
+
+ return ret;
+}
+
+static ssize_t pse_time_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, pos;
+ uint64_t val;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = pse_get_time(pse, &val);
+ if (ret)
+ goto out;
+
+ pos = snprintf((char *)addr, PAGE_SIZE,
+ "pse time = (%llu) nanoseconds\n", val);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+static int pse_dump_power_data(struct mei_pse *pse)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = DUMP_POWER_DATA;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static int dump_power_state_data(struct mei_pse *pse,
+ char *addr, int pos, int len)
+{
+ const char * const names[] = {"EM7D", "SEM"};
+ const char *title =
+ "| power states | duration(ms) | count | percentage(%) |";
+ struct pm_subsys *subsys;
+ uint64_t total_duration, duration, num, frac;
+ int i, j;
+
+ for (i = 0; i < PM_SUBSYS_MAX; i++) {
+ subsys = &((struct pm_data *)pse->pm_data)->subsys[i];
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "power state of %s:\n",
+ names[i]);
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "%s\n",
+ title);
+
+ total_duration = 0;
+ for (j = 0; j < STATE_MAX; j++)
+ total_duration += subsys->status[j].duration;
+
+ for (j = 0; j < STATE_MAX; j++) {
+ duration = subsys->status[j].duration * 100;
+ num = duration / total_duration;
+ frac = (duration % total_duration *
+ 10000000 / total_duration + 5) / 10;
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "|%13.16s |%13llu |%6llu |%7u.%06u |\n",
+ subsys->status[j].name,
+ subsys->status[j].duration,
+ subsys->status[j].count,
+ (uint32_t)num,
+ (uint32_t)frac);
+ }
+
+ pos += snprintf((char *)addr + pos, len - pos, "\n");
+ }
+
+ return pos;
+}
+
+static int dump_dev_power_data(struct mei_pse *pse,
+ char *addr, int pos, int len)
+{
+ const char * const names[] = {"EM7D", "SEM"};
+ struct pm_subsys *subsys;
+ int i, j;
+ const char *title = CONSTRUCTED_TITLE;
+ const char *format = CONSTRUCTED_FORMAT;
+
+ for (i = 0; i < PM_SUBSYS_MAX; i++) {
+ subsys = &((struct pm_data *)pse->pm_data)->subsys[i];
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "device list of %s:\n",
+ names[i]);
+
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ "%s\n",
+ title);
+
+ for (j = 0; j < subsys->dev.dev_cnt; j += 4) {
+ switch (subsys->dev.dev_cnt - j) {
+ case 1:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ "", 0,
+ "", 0,
+ "", 0);
+ break;
+
+ case 2:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ subsys->dev.dev[j+1].name,
+ subsys->dev.dev[j+1].block_cnt,
+ "", 0,
+ "", 0);
+ break;
+
+ case 3:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ subsys->dev.dev[j+1].name,
+ subsys->dev.dev[j+1].block_cnt,
+ subsys->dev.dev[j+2].name,
+ subsys->dev.dev[j+2].block_cnt,
+ "", 0);
+ break;
+
+ default:
+ pos += snprintf((char *)addr + pos,
+ len - pos,
+ format,
+ subsys->dev.dev[j].name,
+ subsys->dev.dev[j].block_cnt,
+ subsys->dev.dev[j+1].name,
+ subsys->dev.dev[j+1].block_cnt,
+ subsys->dev.dev[j+2].name,
+ subsys->dev.dev[j+2].block_cnt,
+ subsys->dev.dev[j+3].name,
+ subsys->dev.dev[j+3].block_cnt);
+ break;
+ }
+ }
+
+ if (i < PM_SUBSYS_MAX - 1)
+ pos += snprintf((char *)addr + pos, len - pos, "\n");
+ }
+
+ return pos;
+}
+
+static ssize_t pse_power_data_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ int ret, pos = 0;
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = pse_dump_power_data(pse);
+ if (ret)
+ goto out;
+
+ pos = dump_power_state_data(pse, (char *)addr, pos, PAGE_SIZE);
+ pos = dump_dev_power_data(pse, (char *)addr, pos, PAGE_SIZE);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+static int pse_get_fw_ver(struct mei_pse *pse, struct fw_version *ver)
+{
+ struct pse_cmd cmd;
+ size_t cmd_len = sizeof(cmd.cmd_id);
+ int ret;
+
+ cmd.cmd_id = GET_FW_VER;
+
+ mutex_lock(&pse->cmd_mutex);
+ ret = mei_pse_send(pse, &cmd, cmd_len);
+ if (!ret) {
+ memcpy(ver, &pse->notif.cont.ver, sizeof(*ver));
+
+ dev_info(&pse->cldev->dev,
+ "fw version: %u.%u.%u.%u\n",
+ ver->major, ver->minor, ver->hotfix, ver->build);
+ }
+ mutex_unlock(&pse->cmd_mutex);
+
+ return ret;
+}
+
+static ssize_t pse_fw_ver_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct mei_pse *pse = file->private_data;
+ int ret, pos;
+ struct fw_version ver;
+ unsigned long addr = get_zeroed_page(GFP_KERNEL);
+
+ if (!addr)
+ return -ENOMEM;
+
+ ret = pse_get_fw_ver(pse, &ver);
+ if (ret)
+ goto out;
+
+ pos = snprintf((char *)addr, PAGE_SIZE,
+ "fw version: %u.%u.%u.%u\n",
+ ver.major, ver.minor, ver.hotfix, ver.build);
+
+ ret = simple_read_from_buffer(buf, count, ppos, (char *)addr, pos);
+
+out:
+ free_page(addr);
+ return ret;
+}
+
+#define PSE_DFS_ADD_FILE(name) \
+ debugfs_create_file(#name, 0644, pse->dfs_dir, pse, \
+ &pse_dfs_##name##_fops)
+
+#define PSE_DFS_FILE_OPS(name) \
+static const struct file_operations pse_dfs_##name##_fops = { \
+ .read = pse_##name##_read, \
+ .write = pse_##name##_write, \
+ .open = simple_open, \
+}
+
+#define PSE_DFS_FILE_READ_OPS(name) \
+static const struct file_operations pse_dfs_##name##_fops = { \
+ .read = pse_##name##_read, \
+ .open = simple_open, \
+}
+
+#define PSE_DFS_FILE_WRITE_OPS(name) \
+static const struct file_operations pse_dfs_##name##_fops = { \
+ .write = pse_##name##_write, \
+ .open = simple_open, \
+}
+
+PSE_DFS_FILE_WRITE_OPS(log_onoff);
+PSE_DFS_FILE_WRITE_OPS(watermark);
+PSE_DFS_FILE_WRITE_OPS(dump_trace);
+PSE_DFS_FILE_WRITE_OPS(timeout);
+PSE_DFS_FILE_WRITE_OPS(log_level);
+
+PSE_DFS_FILE_OPS(time);
+
+PSE_DFS_FILE_READ_OPS(fw_ver);
+PSE_DFS_FILE_READ_OPS(power_data);
+
+/* callback for command response receive */
+static void mei_pse_rx(struct mei_cl_device *cldev)
+{
+ int ret;
+ struct pse_notif *notif;
+ struct mei_pse *pse = mei_cldev_get_drvdata(cldev);
+ struct file *file;
+ loff_t *pos;
+
+ ret = mei_cldev_recv(cldev, pse->recv_buf, MAX_RECV_SIZE);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "failure in recv %d\n", ret);
+ return;
+ }
+ notif = (struct pse_notif *)pse->recv_buf;
+
+ switch (notif->cmd_id) {
+ case TRACE_DATA_NOTIF:
+ if (notif->source) {
+ file = pse->sem_file;
+ pos = &pse->sem_pos;
+ } else {
+ file = pse->em_file;
+ pos = &pse->em_pos;
+ }
+
+ if (*pos < MAX_LOG_SIZE) {
+ ret = kernel_write(file,
+ pse->recv_buf + CONT_OFFSET,
+ ret - CONT_OFFSET,
+ pos);
+ if (ret < 0)
+ dev_err(&cldev->dev,
+ "error in writing log %d\n", ret);
+ else
+ *pos += ret;
+ } else
+ dev_warn(&cldev->dev,
+ "already exceed max log size\n");
+ break;
+
+ case LOG_ONOFF:
+ case SET_WATERMARK:
+ case DUMP_TRACE:
+ case SET_TIMEOUT:
+ case SET_LOG_LEVEL:
+ case SET_TIME:
+ case GET_TIME:
+ case GET_FW_VER:
+ memcpy(&pse->notif, notif, ret);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+ break;
+
+ case DUMP_POWER_DATA:
+ if (notif->status == 0) {
+ memcpy(pse->pm_data + pse->pm_data_pos,
+ pse->recv_buf + NOTIF_HEADER_LEN,
+ ret - NOTIF_HEADER_LEN);
+ pse->pm_data_pos += ret - NOTIF_HEADER_LEN;
+
+ if (pse->pm_data_pos >= sizeof(struct pm_data)) {
+ pse->pm_data_pos = 0;
+ memcpy(&pse->notif, notif, NOTIF_HEADER_LEN);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+ }
+ } else {
+ dev_err(&cldev->dev, "error in recving power data\n");
+
+ pse->pm_data_pos = 0;
+ memcpy(&pse->notif, notif, NOTIF_HEADER_LEN);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+ }
+ break;
+
+ default:
+ dev_err(&cldev->dev,
+ "recv not supported notification\n");
+ break;
+ }
+}
+
+static int mei_pse_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct mei_pse *pse;
+ int ret;
+ uint32_t order = get_order(MAX_RECV_SIZE);
+
+ pse = kzalloc(sizeof(struct mei_pse), GFP_KERNEL);
+ if (!pse)
+ return -ENOMEM;
+
+ pse->recv_buf = (uint8_t *)__get_free_pages(GFP_KERNEL, order);
+ if (!pse->recv_buf) {
+ kfree(pse);
+ return -ENOMEM;
+ }
+
+ pse->pm_data = (uint8_t *)__get_free_pages(GFP_KERNEL, order);
+ if (!pse->pm_data) {
+ free_pages((unsigned long)pse->recv_buf, order);
+ kfree(pse);
+ return -ENOMEM;
+ }
+ pse->pm_data_pos = 0;
+
+ pse->cldev = cldev;
+ mutex_init(&pse->cmd_mutex);
+ init_completion(&pse->response);
+
+ mei_cldev_set_drvdata(cldev, pse);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev,
+ "couldn't enable pse client ret=%d\n", ret);
+ goto err_out;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_pse_rx);
+ if (ret) {
+ dev_err(&cldev->dev,
+ "couldn't register rx event ret=%d\n", ret);
+ goto err_disable;
+ }
+
+ pse->em_file = filp_open(EM_LOG_FILE,
+ O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC,
+ 0600);
+ if (IS_ERR(pse->em_file)) {
+ dev_err(&cldev->dev,
+ "filp_open(%s) failed\n", EM_LOG_FILE);
+ ret = PTR_ERR(pse->em_file);
+ goto err_disable;
+ }
+ pse->em_pos = 0;
+
+ pse->sem_file = filp_open(SEM_LOG_FILE,
+ O_CREAT | O_RDWR | O_LARGEFILE | O_TRUNC,
+ 0600);
+ if (IS_ERR(pse->sem_file)) {
+ dev_err(&cldev->dev,
+ "filp_open(%s) failed\n", SEM_LOG_FILE);
+ ret = PTR_ERR(pse->sem_file);
+ goto err_close;
+ }
+ pse->sem_pos = 0;
+
+ pse->dfs_dir = debugfs_create_dir("vsc_pse", NULL);
+ if (pse->dfs_dir) {
+ PSE_DFS_ADD_FILE(log_onoff);
+ PSE_DFS_ADD_FILE(watermark);
+ PSE_DFS_ADD_FILE(dump_trace);
+ PSE_DFS_ADD_FILE(timeout);
+ PSE_DFS_ADD_FILE(log_level);
+ PSE_DFS_ADD_FILE(time);
+ PSE_DFS_ADD_FILE(fw_ver);
+ PSE_DFS_ADD_FILE(power_data);
+ }
+
+ return 0;
+
+err_close:
+ filp_close(pse->em_file, NULL);
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+err_out:
+ free_pages((unsigned long)pse->pm_data, order);
+
+ free_pages((unsigned long)pse->recv_buf, order);
+
+ kfree(pse);
+
+ return ret;
+}
+
+static void mei_pse_remove(struct mei_cl_device *cldev)
+{
+ struct mei_pse *pse = mei_cldev_get_drvdata(cldev);
+ uint32_t order = get_order(MAX_RECV_SIZE);
+
+ if (!completion_done(&pse->response))
+ complete(&pse->response);
+
+ mei_cldev_disable(cldev);
+
+ debugfs_remove_recursive(pse->dfs_dir);
+
+ filp_close(pse->em_file, NULL);
+ filp_close(pse->sem_file, NULL);
+
+ /* wait until no buffer acccess */
+ mutex_lock(&pse->cmd_mutex);
+ mutex_unlock(&pse->cmd_mutex);
+
+ free_pages((unsigned long)pse->pm_data, order);
+
+ free_pages((unsigned long)pse->recv_buf, order);
+
+ kfree(pse);
+}
+
+#define MEI_UUID_PSE UUID_LE(0xD035E00C, 0x6DAE, 0x4B6D, \
+ 0xB4, 0x7A, 0xF8, 0x8E, 0x30, 0x2A, 0x40, 0x4E)
+
+static const struct mei_cl_device_id mei_pse_tbl[] = {
+ { MEI_PSE_DRIVER_NAME, MEI_UUID_PSE, MEI_CL_VERSION_ANY },
+
+ /* required last entry */
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_pse_tbl);
+
+static struct mei_cl_driver mei_pse_driver = {
+ .id_table = mei_pse_tbl,
+ .name = MEI_PSE_DRIVER_NAME,
+
+ .probe = mei_pse_probe,
+ .remove = mei_pse_remove,
+};
+
+static int __init mei_pse_init(void)
+{
+ int ret;
+
+ ret = mei_cldev_driver_register(&mei_pse_driver);
+ if (ret) {
+ pr_err("mei pse driver registration failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit mei_pse_exit(void)
+{
+ mei_cldev_driver_unregister(&mei_pse_driver);
+}
+
+module_init(mei_pse_init);
+module_exit(mei_pse_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Device driver for Intel VSC PSE client");
@@ -46,4 +46,11 @@ config INTEL_MEI_TXE
Supported SoCs:
Intel Bay Trail
+config INTEL_MEI_VSC
+ tristate "Intel Vision Sensing Controller device with ME interface"
+ select INTEL_MEI
+ depends on X86 && SPI
+ help
+ MEI over SPI for Intel Vision Sensing Controller device
+
source "drivers/misc/mei/hdcp/Kconfig"
@@ -22,6 +22,10 @@ obj-$(CONFIG_INTEL_MEI_TXE) += mei-txe.o
mei-txe-objs := pci-txe.o
mei-txe-objs += hw-txe.o
+obj-$(CONFIG_INTEL_MEI_VSC) += mei-vsc.o
+mei-vsc-objs := spi-vsc.o
+mei-vsc-objs += hw-vsc.o
+
mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
new file mode 100644
@@ -0,0 +1,1637 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/swap.h>
+#include <linux/types.h>
+
+#include "hbm.h"
+#include "hw-vsc.h"
+#include "mei_dev.h"
+#include "mei-trace.h"
+
+static int spi_dev_xfer(struct mei_vsc_hw *hw, void *out_data, void *in_data,
+ int len)
+{
+ hw->xfer.tx_buf = out_data;
+ hw->xfer.rx_buf = in_data;
+ hw->xfer.len = len;
+
+ spi_message_init_with_transfers(&hw->msg, &hw->xfer, 1);
+ return spi_sync_locked(hw->spi, &hw->msg);
+}
+
+#define SPI_XFER_PACKET_CRC(pkt) (*(u32 *)(pkt->buf + pkt->hdr.len))
+static int spi_validate_packet(struct mei_vsc_hw *hw,
+ struct spi_xfer_packet *pkt)
+{
+ u32 base_crc;
+ u32 crc;
+ struct spi_xfer_hdr *hdr = &pkt->hdr;
+
+ base_crc = SPI_XFER_PACKET_CRC(pkt);
+ crc = ~crc32(~0, (u8 *)pkt, sizeof(struct spi_xfer_hdr) + pkt->hdr.len);
+
+ if (base_crc != crc) {
+ dev_err(&hw->spi->dev, "%s crc error cmd %x 0x%x 0x%x\n",
+ __func__, hdr->cmd, base_crc, crc);
+ return -EINVAL;
+ }
+
+ if (hdr->cmd == CMD_SPI_FATAL_ERR) {
+ dev_err(&hw->spi->dev,
+ "receive fatal error from FW cmd %d %d %d.\nCore dump: %s\n",
+ hdr->cmd, hdr->seq, hw->seq, (char *)pkt->buf);
+ return -EIO;
+ } else if (hdr->cmd == CMD_SPI_NACK || hdr->cmd == CMD_SPI_BUSY ||
+ hdr->seq != hw->seq) {
+ dev_err(&hw->spi->dev, "receive error from FW cmd %d %d %d\n",
+ hdr->cmd, hdr->seq, hw->seq);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static inline bool spi_rom_xfer_asserted(struct mei_vsc_hw *hw)
+{
+ return gpiod_get_value_cansleep(hw->wakeuphost);
+}
+
+static inline bool spi_xfer_asserted(struct mei_vsc_hw *hw)
+{
+ return atomic_read(&hw->lock_cnt) > 0 &&
+ gpiod_get_value_cansleep(hw->wakeuphost);
+}
+
+static void spi_xfer_lock(struct mei_vsc_hw *hw)
+{
+ gpiod_set_value_cansleep(hw->wakeupfw, 0);
+}
+
+static void spi_xfer_unlock(struct mei_vsc_hw *hw)
+{
+ atomic_dec_if_positive(&hw->lock_cnt);
+ gpiod_set_value_cansleep(hw->wakeupfw, 1);
+}
+
+static bool spi_xfer_locked(struct mei_vsc_hw *hw)
+{
+ return !gpiod_get_value_cansleep(hw->wakeupfw);
+}
+
+static bool spi_need_read(struct mei_vsc_hw *hw)
+{
+ return spi_xfer_asserted(hw) && !spi_xfer_locked(hw);
+}
+
+#define WAIT_FW_ASSERTED_TIMEOUT (2 * HZ)
+static int spi_xfer_wait_asserted(struct mei_vsc_hw *hw)
+{
+ wait_event_timeout(hw->xfer_wait, spi_xfer_asserted(hw),
+ WAIT_FW_ASSERTED_TIMEOUT);
+
+ dev_dbg(&hw->spi->dev, "%s %d %d %d\n", __func__,
+ atomic_read(&hw->lock_cnt),
+ gpiod_get_value_cansleep(hw->wakeupfw),
+ gpiod_get_value_cansleep(hw->wakeuphost));
+ if (!spi_xfer_asserted(hw))
+ return -ETIME;
+ else
+ return 0;
+}
+
+static int spi_wakeup_request(struct mei_vsc_hw *hw)
+{
+ /* wakeup spi slave and wait for response */
+ spi_xfer_lock(hw);
+ return spi_xfer_wait_asserted(hw);
+}
+
+static void spi_wakeup_release(struct mei_vsc_hw *hw)
+{
+ return spi_xfer_unlock(hw);
+}
+
+static int find_sync_byte(u8 *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] == PACKET_SYNC)
+ return i;
+
+ return -1;
+}
+
+#define PACKET_PADDING_SIZE 1
+#define MAX_XFER_COUNT 5
+static int mei_vsc_xfer_internal(struct mei_vsc_hw *hw,
+ struct spi_xfer_packet *pkt,
+ struct spi_xfer_packet *ack_pkt)
+{
+ u8 *rx_buf = hw->rx_buf1;
+ u8 *tx_buf = hw->tx_buf1;
+ int next_xfer_len = PACKET_SIZE(pkt) + XFER_TIMEOUT_BYTES;
+ int offset = 0;
+ bool synced = false;
+ int len;
+ int count_down = MAX_XFER_COUNT;
+ int ret = 0;
+ int i;
+
+ dev_dbg(&hw->spi->dev, "spi tx pkt begin: %s %d %d\n", __func__,
+ spi_xfer_asserted(hw), gpiod_get_value_cansleep(hw->wakeupfw));
+ memcpy(tx_buf, pkt, PACKET_SIZE(pkt));
+ memset(rx_buf, 0, MAX_XFER_BUFFER_SIZE);
+
+ do {
+ dev_dbg(&hw->spi->dev,
+ "spi tx pkt partial ing: %s %d %d %d %d\n", __func__,
+ spi_xfer_asserted(hw),
+ gpiod_get_value_cansleep(hw->wakeupfw), next_xfer_len,
+ synced);
+
+ count_down--;
+ ret = spi_dev_xfer(hw, tx_buf, rx_buf, next_xfer_len);
+ if (ret)
+ return ret;
+
+ memset(tx_buf, 0, MAX_XFER_BUFFER_SIZE);
+ if (!synced) {
+ i = find_sync_byte(rx_buf, next_xfer_len);
+ if (i >= 0) {
+ synced = true;
+ len = next_xfer_len - i;
+ } else {
+ continue;
+ }
+
+ } else {
+ i = 0;
+ len = min_t(int, next_xfer_len,
+ sizeof(*ack_pkt) - offset);
+ }
+
+ memcpy(&ack_pkt[offset], &rx_buf[i], len);
+ offset += len;
+
+ if (offset >= sizeof(ack_pkt->hdr))
+ next_xfer_len = PACKET_SIZE(ack_pkt) - offset +
+ PACKET_PADDING_SIZE;
+
+ } while (next_xfer_len > 0 && count_down > 0);
+
+ dev_dbg(&hw->spi->dev, "spi tx pkt done: %s %d %d cmd %d %d %d %d\n",
+ __func__, next_xfer_len, count_down, ack_pkt->hdr.sync,
+ ack_pkt->hdr.cmd, ack_pkt->hdr.len, ack_pkt->hdr.seq);
+
+ if (next_xfer_len > 0)
+ return -EAGAIN;
+
+ return spi_validate_packet(hw, ack_pkt);
+}
+
+static int mei_vsc_xfer(struct mei_vsc_hw *hw, u8 cmd, void *tx, u32 tx_len,
+ void *rx, int rx_max_len, u32 *rx_len)
+{
+ struct spi_xfer_packet *pkt;
+ struct spi_xfer_packet *ack_pkt;
+ u32 *crc;
+ int ret;
+
+ if (!tx || !rx || tx_len > MAX_SPI_MSG_SIZE)
+ return -EINVAL;
+
+ if (rx_len)
+ *rx_len = 0;
+
+ pkt = kzalloc(sizeof(*pkt) + sizeof(*ack_pkt), GFP_KERNEL);
+ ack_pkt = pkt + 1;
+ if (!pkt || !ack_pkt)
+ return -ENOMEM;
+
+ pkt->hdr.sync = PACKET_SYNC;
+ pkt->hdr.cmd = cmd;
+ pkt->hdr.seq = ++hw->seq;
+ pkt->hdr.len = tx_len;
+
+ memcpy(pkt->buf, tx, tx_len);
+ crc = (u32 *)(pkt->buf + tx_len);
+ *crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt->hdr) + tx_len);
+
+ mutex_lock(&hw->mutex);
+
+ ret = spi_wakeup_request(hw);
+ if (ret) {
+ dev_err(&hw->spi->dev, "wakeup vsc FW failed\n");
+ goto out;
+ }
+
+ ret = mei_vsc_xfer_internal(hw, pkt, ack_pkt);
+ if (ret)
+ goto out;
+
+ if (ack_pkt->hdr.len > 0) {
+ int len;
+
+ len = (ack_pkt->hdr.len < rx_max_len) ? ack_pkt->hdr.len :
+ rx_max_len;
+ memcpy(rx, ack_pkt->buf, len);
+ if (rx_len)
+ *rx_len = len;
+ }
+
+out:
+ spi_wakeup_release(hw);
+ mutex_unlock(&hw->mutex);
+ kfree(pkt);
+ return ret;
+}
+
+static int mei_vsc_read_raw(struct mei_vsc_hw *hw, u8 *buf, u32 max_len,
+ u32 *len)
+{
+ struct host_timestamp ts = { 0 };
+
+ ts.realtime = ktime_to_ns(ktime_get_real());
+ ts.boottime = ktime_to_ns(ktime_get_boottime());
+
+ return mei_vsc_xfer(hw, CMD_SPI_READ, &ts, sizeof(ts), buf, max_len,
+ len);
+}
+
+static int mei_vsc_write_raw(struct mei_vsc_hw *hw, u8 *buf, u32 len)
+{
+ u8 status = 0;
+ int rx_len;
+
+ return mei_vsc_xfer(hw, CMD_SPI_WRITE, buf, len, &status,
+ sizeof(status), &rx_len);
+}
+
+#define LOADER_XFER_RETRY_COUNT 25
+static int spi_rom_dev_xfer(struct mei_vsc_hw *hw, void *out_data,
+ void *in_data, int len)
+{
+ int ret;
+ int i;
+ u32 *tmp = out_data;
+ int retry = 0;
+
+ if (len % 4 != 0)
+ return -EINVAL;
+
+ for (i = 0; i < len / 4; i++)
+ tmp[i] = ___constant_swab32(tmp[i]);
+
+ mutex_lock(&hw->mutex);
+ while (retry < LOADER_XFER_RETRY_COUNT) {
+ if (!spi_rom_xfer_asserted(hw))
+ break;
+
+ msleep(20);
+ retry++;
+ }
+
+ if (retry >= LOADER_XFER_RETRY_COUNT) {
+ dev_err(&hw->spi->dev, "%s retry %d times gpio %d\n", __func__,
+ retry, spi_rom_xfer_asserted(hw));
+ mutex_unlock(&hw->mutex);
+ return -EAGAIN;
+ }
+
+ ret = spi_dev_xfer(hw, out_data, in_data, len);
+ mutex_unlock(&hw->mutex);
+ if (!in_data || ret)
+ return ret;
+
+ tmp = in_data;
+ for (i = 0; i < len / 4; i++)
+ tmp[i] = ___constant_swab32(tmp[i]);
+
+ return 0;
+}
+
+#define VSC_RESET_PIN_TOGGLE_INTERVAL 20
+#define VSC_ROM_BOOTUP_DELAY_TIME 10
+static int vsc_reset(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ gpiod_set_value_cansleep(hw->resetfw, 1);
+ msleep(VSC_RESET_PIN_TOGGLE_INTERVAL);
+ gpiod_set_value_cansleep(hw->resetfw, 0);
+ msleep(VSC_RESET_PIN_TOGGLE_INTERVAL);
+ gpiod_set_value_cansleep(hw->resetfw, 1);
+ msleep(VSC_ROM_BOOTUP_DELAY_TIME);
+ /* set default host wake pin to 1, which try to avoid unexpected host irq interrupt */
+ gpiod_set_value_cansleep(hw->wakeupfw, 1);
+ return 0;
+}
+
+/* %s is sensor name, need to be get and format in runtime */
+static char *fw_name_template[][3] = {
+ {
+ "vsc/soc_a1/ivsc_fw_a1.bin",
+ "vsc/soc_a1/ivsc_pkg_%s_0_a1.bin",
+ "vsc/soc_a1/ivsc_skucfg_%s_0_1_a1.bin",
+ },
+ {
+ "vsc/soc_a1_prod/ivsc_fw_a1_prod.bin",
+ "vsc/soc_a1_prod/ivsc_pkg_%s_0_a1_prod.bin",
+ "vsc/soc_a1_prod/ivsc_skucfg_%s_0_1_a1_prod.bin",
+ },
+};
+
+static int check_silicon(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_rom_master_frame *frame =
+ (struct vsc_rom_master_frame *)hw->fw.tx_buf;
+ struct vsc_rom_slave_token *token =
+ (struct vsc_rom_slave_token *)hw->fw.rx_buf;
+ int ret;
+ u32 efuse1;
+ u32 strap;
+
+ dev_dbg(dev->dev, "%s size %zu %zu\n", __func__, sizeof(*frame),
+ sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DUMP_MEM;
+
+ frame->data.dump_mem.addr = EFUSE1_ADDR;
+ frame->data.dump_mem.len = 4;
+
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR) {
+ dev_err(dev->dev, "%s %d %d %d\n", __func__, __LINE__, ret,
+ token->token);
+ return ret;
+ }
+
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_RESERVED;
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR ||
+ token->token != VSC_TOKEN_DUMP_RESP) {
+ dev_err(dev->dev, "%s %d %d %d\n", __func__, __LINE__, ret,
+ token->token);
+ return -EIO;
+ }
+
+ efuse1 = *(u32 *)token->payload;
+ dev_dbg(dev->dev, "%s efuse1=%d\n", __func__, efuse1);
+
+ /* to check the silicon main and sub version */
+ hw->fw.main_ver = (efuse1 >> SI_MAINSTEPPING_VERSION_OFFSET) &
+ SI_MAINSTEPPING_VERSION_MASK;
+ hw->fw.sub_ver = (efuse1 >> SI_SUBSTEPPING_VERSION_OFFSET) &
+ SI_SUBSTEPPING_VERSION_MASK;
+ if (hw->fw.main_ver != SI_MAINSTEPPING_VERSION_A) {
+ dev_err(dev->dev, "%s: silicon main version error(%d)\n",
+ __func__, hw->fw.main_ver);
+ return -EINVAL;
+ }
+ if (hw->fw.sub_ver != SI_SUBSTEPPING_VERSION_0 &&
+ hw->fw.sub_ver != SI_SUBSTEPPING_VERSION_1) {
+ dev_dbg(dev->dev, "%s: silicon sub version error(%d)\n", __func__,
+ hw->fw.sub_ver);
+ return -EINVAL;
+ }
+
+ /* to get the silicon strap key: debug or production ? */
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DUMP_MEM;
+ frame->data.dump_mem.addr = STRAP_ADDR;
+ frame->data.dump_mem.len = 4;
+
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR) {
+ dev_err(dev->dev, "%s: transfer failed or invalid token\n",
+ __func__);
+ return ret;
+ }
+
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_RESERVED;
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret || token->token == VSC_TOKEN_ERROR ||
+ token->token != VSC_TOKEN_DUMP_RESP) {
+ dev_err(dev->dev,
+ "%s: transfer failed or invalid token-> (token = %d)\n",
+ __func__, token->token);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev,
+ "%s: getting the memory(0x%0x), step 2 payload: 0x%0x\n",
+ __func__, STRAP_ADDR, *(u32 *)token->payload);
+
+ strap = *(u32 *)token->payload;
+ dev_dbg(dev->dev, "%s: strap = 0x%x\n", __func__, strap);
+
+ /* to check the silicon strap key source */
+ hw->fw.key_src =
+ (strap >> SI_STRAP_KEY_SRC_OFFSET) & SI_STRAP_KEY_SRC_MASK;
+
+ dev_dbg(dev->dev, "%s: silicon version check done: %s%s\n", __func__,
+ hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_0 ? "A0" : "A1",
+ hw->fw.key_src == SI_STRAP_KEY_SRC_DEBUG ? "" : "-prod");
+ if (hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_1) {
+ if (hw->fw.key_src == SI_STRAP_KEY_SRC_DEBUG) {
+ snprintf(hw->fw.fw_file_name,
+ sizeof(hw->fw.fw_file_name),
+ fw_name_template[0][0]);
+ snprintf(hw->fw.sensor_file_name,
+ sizeof(hw->fw.sensor_file_name),
+ fw_name_template[0][1], hw->cam_sensor_name);
+ snprintf(hw->fw.sku_cnf_file_name,
+ sizeof(hw->fw.sku_cnf_file_name),
+ fw_name_template[0][2], hw->cam_sensor_name);
+ } else {
+ snprintf(hw->fw.fw_file_name,
+ sizeof(hw->fw.fw_file_name),
+ fw_name_template[1][0]);
+ snprintf(hw->fw.sensor_file_name,
+ sizeof(hw->fw.sensor_file_name),
+ fw_name_template[1][1], hw->cam_sensor_name);
+ snprintf(hw->fw.sku_cnf_file_name,
+ sizeof(hw->fw.sku_cnf_file_name),
+ fw_name_template[1][2], hw->cam_sensor_name);
+ }
+ }
+
+ return 0;
+}
+
+static int parse_main_fw(struct mei_device *dev, const struct firmware *fw)
+{
+ struct bootloader_sign *bootloader = NULL;
+ struct firmware_sign *arc_sem = NULL;
+ struct firmware_sign *em7d = NULL;
+ struct firmware_sign *ace_run = NULL;
+ struct firmware_sign *ace_vis = NULL;
+ struct firmware_sign *ace_conf = NULL;
+ struct vsc_boot_img *img = (struct vsc_boot_img *)fw->data;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct manifest *man = NULL;
+ struct fragment *bootl_frag = &hw->fw.frags[BOOT_IMAGE_TYPE];
+ struct fragment *arcsem_frag = &hw->fw.frags[ARC_SEM_IMG_TYPE];
+ struct fragment *acer_frag = &hw->fw.frags[ACER_IMG_TYPE];
+ struct fragment *acev_frag = &hw->fw.frags[ACEV_IMG_TYPE];
+ struct fragment *acec_frag = &hw->fw.frags[ACEC_IMG_TYPE];
+ struct fragment *em7d_frag = &hw->fw.frags[EM7D_IMG_TYPE];
+ struct firmware_sign *firmwares[IMG_CNT_MAX];
+ int i;
+
+ if (!img || img->magic != VSC_FILE_MAGIC) {
+ dev_err(dev->dev, "image file error\n");
+ return -EINVAL;
+ }
+
+ if (img->image_count < IMG_BOOT_ARC_EM7D ||
+ img->image_count > IMG_CNT_MAX) {
+ dev_err(dev->dev, "%s: image count error: image_count=0x%x\n",
+ __func__, img->image_count);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "%s: img->image_count=%d\n", __func__,
+ img->image_count);
+
+ /* only two lower bytes are used */
+ hw->fw.fw_option = img->option & 0xFFFF;
+ /* image not include bootloader */
+ hw->fw.fw_cnt = img->image_count - 1;
+
+ bootloader =
+ (struct bootloader_sign *)(img->image_loc + img->image_count);
+ if ((u8 *)bootloader > (fw->data + fw->size))
+ return -EINVAL;
+
+ if (bootloader->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "bootloader signed magic error! magic number 0x%08x, image size 0x%08x\n",
+ bootloader->magic, bootloader->image_size);
+ return -EINVAL;
+ }
+
+ man = (struct manifest *)((char *)bootloader->image +
+ bootloader->image_size - SIG_SIZE -
+ sizeof(struct manifest) - CSSHEADER_SIZE);
+ if (man->svn == MAX_SVN_VALUE)
+ hw->fw.svn = MAX_SVN_VALUE;
+ else if (hw->fw.svn == 0)
+ hw->fw.svn = man->svn;
+
+ dev_dbg(dev->dev, "%s: svn: 0x%08X", __func__, hw->fw.svn);
+ /* currently only support silicon versoin A0 | A1 */
+ if ((hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_0 &&
+ hw->fw.svn != MAX_SVN_VALUE) ||
+ (hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_1 &&
+ hw->fw.svn == MAX_SVN_VALUE)) {
+ dev_err(dev->dev,
+ "silicon version and image svn not matched(A%s:0x%x)\n",
+ hw->fw.sub_ver == SI_SUBSTEPPING_VERSION_0 ? "0" : "1",
+ hw->fw.svn);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < img->image_count - 1; i++) {
+ if (i == 0) {
+ firmwares[i] =
+ (struct firmware_sign *)(bootloader->image +
+ bootloader->image_size);
+ dev_dbg(dev->dev,
+ "FW (%d/%d) magic number 0x%08x, image size 0x%08x\n",
+ i, img->image_count, firmwares[i]->magic,
+ firmwares[i]->image_size);
+ continue;
+ }
+
+ firmwares[i] =
+ (struct firmware_sign *)(firmwares[i - 1]->image +
+ firmwares[i - 1]->image_size);
+
+ if ((u8 *)firmwares[i] > fw->data + fw->size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev,
+ "FW (%d/%d) magic number 0x%08x, image size 0x%08x\n", i,
+ img->image_count, firmwares[i]->magic,
+ firmwares[i]->image_size);
+ if (firmwares[i]->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "FW (%d/%d) magic error! magic number 0x%08x, image size 0x%08x\n",
+ i, img->image_count, firmwares[i]->magic,
+ firmwares[i]->image_size);
+
+ return -EINVAL;
+ }
+ }
+
+ arc_sem = firmwares[0];
+ if (img->image_count >= IMG_BOOT_ARC_EM7D)
+ em7d = firmwares[img->image_count - 2];
+
+ if (img->image_count >= IMG_BOOT_ARC_ACER_EM7D)
+ ace_run = firmwares[1];
+
+ if (img->image_count >= IMG_BOOT_ARC_ACER_ACEV_EM7D)
+ ace_vis = firmwares[2];
+
+ if (img->image_count >= IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D)
+ ace_conf = firmwares[3];
+
+ bootl_frag->data = bootloader->image;
+ bootl_frag->size = bootloader->image_size;
+ bootl_frag->location = img->image_loc[0];
+ if (!bootl_frag->location)
+ return -EINVAL;
+
+ if (!arc_sem)
+ return -EINVAL;
+ arcsem_frag->data = arc_sem->image;
+ arcsem_frag->size = arc_sem->image_size;
+ arcsem_frag->location = img->image_loc[1];
+ if (!arcsem_frag->location)
+ return -EINVAL;
+
+ if (ace_run) {
+ acer_frag->data = ace_run->image;
+ acer_frag->size = ace_run->image_size;
+ acer_frag->location = img->image_loc[2];
+ if (!acer_frag->location)
+ return -EINVAL;
+
+ if (ace_vis) {
+ acev_frag->data = ace_vis->image;
+ acev_frag->size = ace_vis->image_size;
+ /* Align to 4K boundary */
+ acev_frag->location = ((acer_frag->location +
+ acer_frag->size + 0xFFF) &
+ ~(0xFFF));
+ if (img->image_loc[3] &&
+ acer_frag->location != img->image_loc[3]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[3] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[3], acev_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acev_frag->location = img->image_loc[3];
+ }
+ }
+
+ if (ace_conf) {
+ acec_frag->data = ace_conf->image;
+ acec_frag->size = ace_conf->image_size;
+ /* Align to 4K boundary */
+ acec_frag->location = ((acev_frag->location +
+ acev_frag->size + 0xFFF) &
+ ~(0xFFF));
+ if (img->image_loc[4] &&
+ acec_frag->location != img->image_loc[4]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[4] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[4], acec_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acec_frag->location = img->image_loc[4];
+ }
+ }
+ }
+
+ em7d_frag->data = em7d->image;
+ em7d_frag->size = em7d->image_size;
+ /* em7d is the last firmware */
+ em7d_frag->location = img->image_loc[img->image_count - 1];
+ if (!em7d_frag->location)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int parse_sensor_fw(struct mei_device *dev, const struct firmware *fw)
+{
+ struct firmware_sign *ace_vis = NULL;
+ struct firmware_sign *ace_conf = NULL;
+ struct vsc_boot_img *img = (struct vsc_boot_img *)fw->data;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct fragment *acer_frag = &hw->fw.frags[ACER_IMG_TYPE];
+ struct fragment *acev_frag = &hw->fw.frags[ACEV_IMG_TYPE];
+ struct fragment *acec_frag = &hw->fw.frags[ACEC_IMG_TYPE];
+
+ if (!img || img->magic != VSC_FILE_MAGIC ||
+ img->image_count < IMG_ACEV_ACECNF ||
+ img->image_count > IMG_CNT_MAX)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "%s: img->image_count=%d\n", __func__,
+ img->image_count);
+
+ hw->fw.fw_cnt += img->image_count;
+ if (hw->fw.fw_cnt > IMG_CNT_MAX)
+ return -EINVAL;
+
+ ace_vis = (struct firmware_sign *)(img->image_loc + img->image_count);
+ ace_conf =
+ (struct firmware_sign *)(ace_vis->image + ace_vis->image_size);
+
+ dev_dbg(dev->dev,
+ "ACE vision signed magic number 0x%08x, image size 0x%08x\n",
+ ace_vis->magic, ace_vis->image_size);
+ if (ace_vis->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "ACE vision signed magic error! magic number 0x%08x, image size 0x%08x\n",
+ ace_vis->magic, ace_vis->image_size);
+ return -EINVAL;
+ }
+
+ acev_frag->data = ace_vis->image;
+ acev_frag->size = ace_vis->image_size;
+ /* Align to 4K boundary */
+ acev_frag->location =
+ ((acer_frag->location + acer_frag->size + 0xFFF) & ~(0xFFF));
+ if (img->image_loc[0] && acer_frag->location != img->image_loc[0]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[0] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[0], acev_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acev_frag->location = img->image_loc[0];
+ }
+
+ dev_dbg(dev->dev,
+ "ACE config signed magic number 0x%08x, image size 0x%08x\n",
+ ace_conf->magic, ace_conf->image_size);
+ if (ace_conf->magic != VSC_FW_MAGIC) {
+ dev_err(dev->dev,
+ "ACE config signed magic error! magic number 0x%08x, image size 0x%08x\n",
+ ace_conf->magic, ace_conf->image_size);
+ return -EINVAL;
+ }
+
+ acec_frag->data = ace_conf->image;
+ acec_frag->size = ace_conf->image_size;
+ /* Align to 4K boundary */
+ acec_frag->location =
+ ((acev_frag->location + acev_frag->size + 0xFFF) & ~(0xFFF));
+ if (img->image_loc[1] && acec_frag->location != img->image_loc[1]) {
+ dev_err(dev->dev,
+ "ACE vision image location error. img->image_loc[1] = 0x%x, calculated is 0x%x\n",
+ img->image_loc[1], acec_frag->location);
+ /* when location mismatch, use the one from image file. */
+ acec_frag->location = img->image_loc[1];
+ }
+
+ return 0;
+}
+
+static int parse_sku_cnf_fw(struct mei_device *dev, const struct firmware *fw)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct fragment *skucnf_frag = &hw->fw.frags[SKU_CONF_TYPE];
+
+ if (fw->size <= sizeof(u32))
+ return -EINVAL;
+
+ skucnf_frag->data = fw->data;
+ skucnf_frag->size = *((u32 *)fw->data) + sizeof(u32);
+ /* SKU config use fixed location */
+ skucnf_frag->location = SKU_CONFIG_LOC;
+ if (fw->size != skucnf_frag->size || fw->size > SKU_MAX_SIZE) {
+ dev_err(dev->dev,
+ "sku config file size is not config size + 4, config size = 0x%x, file size=0x%zx\n",
+ skucnf_frag->size, fw->size);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 sum_CRC(void *data, int size)
+{
+ int i;
+ u32 crc = 0;
+
+ for (i = 0; i < size; i++)
+ crc += *((u8 *)data + i);
+
+ return crc;
+}
+
+static int load_boot(struct mei_device *dev, const void *data, int size)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_rom_master_frame *frame =
+ (struct vsc_rom_master_frame *)hw->fw.tx_buf;
+ struct vsc_rom_slave_token *token =
+ (struct vsc_rom_slave_token *)hw->fw.rx_buf;
+ const u8 *ptr = data;
+ u32 remain;
+ int ret;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "==== %s: image payload size : %d\n", __func__, size);
+ remain = size;
+ while (remain > 0) {
+ u32 max_len = sizeof(frame->data.dl_cont.payload);
+ u32 len = remain > max_len ? max_len : remain;
+
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_CONT;
+
+ frame->data.dl_cont.len = (u16)len;
+ frame->data.dl_cont.end_flag = (remain == len ? 1 : 0);
+ memcpy(frame->data.dl_cont.payload, ptr, len);
+
+ ret = spi_rom_dev_xfer(hw, frame, NULL, VSC_ROM_SPI_PKG_SIZE);
+ if (ret) {
+ dev_err(dev->dev, "%s: transfer failed\n", __func__);
+ break;
+ }
+
+ ptr += len;
+ remain -= len;
+ }
+
+ return ret;
+}
+
+static int load_bootloader(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_rom_master_frame *frame =
+ (struct vsc_rom_master_frame *)hw->fw.tx_buf;
+ struct vsc_rom_slave_token *token =
+ (struct vsc_rom_slave_token *)hw->fw.rx_buf;
+ struct fragment *fragment = &hw->fw.frags[BOOT_IMAGE_TYPE];
+ int ret;
+
+ if (!fragment->size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "verify bootloader token ...\n");
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_QUERY;
+ ret = spi_rom_dev_xfer(hw, frame, token, VSC_ROM_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ if (token->token != VSC_TOKEN_BOOTLOADER_REQ &&
+ token->token != VSC_TOKEN_DUMP_RESP) {
+ dev_err(dev->dev,
+ "failed to load bootloader, invalid token 0x%x\n",
+ token->token);
+ return -EINVAL;
+ }
+ dev_dbg(dev->dev, "bootloader token has been verified\n");
+
+ dev_dbg(dev->dev, "start download, image len: %u ...\n", fragment->size);
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_START;
+ frame->data.dl_start.img_type = IMG_BOOTLOADER;
+ frame->data.dl_start.img_len = fragment->size;
+ frame->data.dl_start.img_loc = fragment->location;
+ frame->data.dl_start.option = (u16)hw->fw.fw_option;
+ frame->data.dl_start.crc =
+ sum_CRC(frame, (int)offsetof(struct vsc_rom_master_frame,
+ data.dl_start.crc));
+ ret = spi_rom_dev_xfer(hw, frame, NULL, VSC_ROM_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev, "load bootloader payload ...\n");
+ ret = load_boot(dev, fragment->data, fragment->size);
+ if (ret)
+ dev_err(dev->dev, "failed to load bootloader, err : 0x%0x\n",
+ ret);
+
+ return ret;
+}
+
+static int load_fw_bin(struct mei_device *dev, const void *data, int size)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_master_frame_fw_cont *frame =
+ (struct vsc_master_frame_fw_cont *)hw->fw.tx_buf;
+ struct vsc_bol_slave_token *token =
+ (struct vsc_bol_slave_token *)hw->fw.rx_buf;
+ const u8 *ptr = data;
+ int ret;
+ u32 remain;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ dev_dbg(dev->dev, "==== %s: image payload size : %d\n", __func__, size);
+ remain = size;
+ while (remain > 0) {
+ u32 len = remain > FW_SPI_PKG_SIZE ? FW_SPI_PKG_SIZE : remain;
+
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ memcpy(frame->payload, ptr, len);
+
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret) {
+ dev_err(dev->dev, "transfer failed\n");
+ break;
+ }
+
+ ptr += len;
+ remain -= len;
+ }
+
+ return ret;
+}
+
+static int load_fw_frag(struct mei_device *dev, struct fragment *frag, int type)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_fw_master_frame *frame =
+ (struct vsc_fw_master_frame *)hw->fw.tx_buf;
+ struct vsc_bol_slave_token *token =
+ (struct vsc_bol_slave_token *)hw->rx_buf;
+ int ret;
+
+ dev_dbg(dev->dev,
+ "start download firmware type %d ... loc:0x%08x, size:0x%08x\n",
+ type, frag->location, frag->size);
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_START;
+ frame->data.dl_start.img_type = type;
+ frame->data.dl_start.img_len = frag->size;
+ frame->data.dl_start.img_loc = frag->location;
+ frame->data.dl_start.option = (u16)hw->fw.fw_option;
+ frame->data.dl_start.crc = sum_CRC(
+ frame, offsetof(struct vsc_fw_master_frame, data.dl_start.crc));
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ return load_fw_bin(dev, frag->data, frag->size);
+}
+
+static int load_fw(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct vsc_fw_master_frame *frame =
+ (struct vsc_fw_master_frame *)hw->fw.tx_buf;
+ struct vsc_bol_slave_token *token =
+ (struct vsc_bol_slave_token *)hw->rx_buf;
+ struct fragment *arcsem_frag = NULL;
+ struct fragment *em7d_frag = NULL;
+ struct fragment *acer_frag = NULL;
+ struct fragment *acev_frag = NULL;
+ struct fragment *acec_frag = NULL;
+ struct fragment *skucnf_frag = NULL;
+ int index = 0;
+ int ret;
+
+ if (hw->fw.frags[ARC_SEM_IMG_TYPE].size > 0)
+ arcsem_frag = &hw->fw.frags[ARC_SEM_IMG_TYPE];
+
+ if (hw->fw.frags[EM7D_IMG_TYPE].size > 0)
+ em7d_frag = &hw->fw.frags[EM7D_IMG_TYPE];
+
+ if (hw->fw.frags[ACER_IMG_TYPE].size > 0)
+ acer_frag = &hw->fw.frags[ACER_IMG_TYPE];
+
+ if (hw->fw.frags[ACEV_IMG_TYPE].size > 0)
+ acev_frag = &hw->fw.frags[ACEV_IMG_TYPE];
+
+ if (hw->fw.frags[ACEC_IMG_TYPE].size > 0)
+ acec_frag = &hw->fw.frags[ACEC_IMG_TYPE];
+
+ if (hw->fw.frags[SKU_CONF_TYPE].size > 0)
+ skucnf_frag = &hw->fw.frags[SKU_CONF_TYPE];
+
+ if (!arcsem_frag || !em7d_frag) {
+ dev_err(dev->dev, "invalid image or signature data\n");
+ return -EINVAL;
+ }
+
+ /* send dl_set frame */
+ dev_dbg(dev->dev, "send dl_set frame ...\n");
+ memset(frame, 0, sizeof(*frame));
+ memset(token, 0, sizeof(*token));
+
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_CMD_DL_SET;
+ frame->data.dl_set.option = (u16)hw->fw.fw_option;
+ frame->data.dl_set.img_cnt = (u8)hw->fw.fw_cnt;
+ dev_dbg(dev->dev, "%s: img_cnt = %d ...\n", __func__,
+ frame->data.dl_set.img_cnt);
+
+ frame->data.dl_set.payload[index++] = arcsem_frag->location;
+ frame->data.dl_set.payload[index++] = arcsem_frag->size;
+ if (acer_frag) {
+ frame->data.dl_set.payload[index++] = acer_frag->location;
+ frame->data.dl_set.payload[index++] = acer_frag->size;
+ if (acev_frag) {
+ frame->data.dl_set.payload[index++] =
+ acev_frag->location;
+ frame->data.dl_set.payload[index++] = acev_frag->size;
+ }
+ if (acec_frag) {
+ frame->data.dl_set.payload[index++] =
+ acec_frag->location;
+ frame->data.dl_set.payload[index++] = acec_frag->size;
+ }
+ }
+ frame->data.dl_set.payload[index++] = em7d_frag->location;
+ frame->data.dl_set.payload[index++] = em7d_frag->size;
+ frame->data.dl_set.payload[hw->fw.fw_cnt * 2] = sum_CRC(
+ frame, (int)offsetof(struct vsc_fw_master_frame,
+ data.dl_set.payload[hw->fw.fw_cnt * 2]));
+
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret)
+ return ret;
+
+ /* load ARC-SEM FW image */
+ if (arcsem_frag) {
+ ret = load_fw_frag(dev, arcsem_frag, IMG_ARCSEM);
+ if (ret)
+ return ret;
+ }
+
+ /* load ACE FW image */
+ if (acer_frag) {
+ ret = load_fw_frag(dev, acer_frag, IMG_ACE_RUNTIME);
+ if (ret)
+ return ret;
+ }
+
+ if (acev_frag) {
+ ret = load_fw_frag(dev, acev_frag, IMG_ACE_VISION);
+ if (ret)
+ return ret;
+ }
+
+ if (acec_frag) {
+ ret = load_fw_frag(dev, acec_frag, IMG_ACE_CONFIG);
+ if (ret)
+ return ret;
+ }
+
+ /* load EM7D FW image */
+ if (em7d_frag) {
+ ret = load_fw_frag(dev, em7d_frag, IMG_EM7D);
+ if (ret)
+ return ret;
+ }
+
+ /* load SKU Config */
+ if (skucnf_frag) {
+ ret = load_fw_frag(dev, skucnf_frag, IMG_SKU_CONFIG);
+ if (ret)
+ return ret;
+ }
+
+ memset(frame, 0, sizeof(*frame));
+ frame->magic = VSC_MAGIC_NUM;
+ frame->cmd = VSC_TOKEN_CAM_BOOT;
+ frame->data.boot.check_sum = sum_CRC(
+ frame, offsetof(struct vsc_fw_master_frame, data.dl_start.crc));
+ ret = spi_rom_dev_xfer(hw, frame, NULL, FW_SPI_PKG_SIZE);
+ if (ret)
+ dev_err(dev->dev, "failed to boot fw, err : 0x%x\n", ret);
+
+ return ret;
+}
+
+static int init_hw(struct mei_device *dev)
+{
+ int ret;
+ const struct firmware *fw = NULL;
+ const struct firmware *sensor_fw = NULL;
+ const struct firmware *sku_cnf_fw = NULL;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ ret = check_silicon(dev);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev->dev,
+ "%s: FW files. Firmware Boot File: %s, Sensor FW File: %s, Sku Config File: %s\n",
+ __func__, hw->fw.fw_file_name, hw->fw.sensor_file_name,
+ hw->fw.sku_cnf_file_name);
+ ret = request_firmware(&fw, hw->fw.fw_file_name, dev->dev);
+ if (ret < 0 || !fw) {
+ dev_err(&hw->spi->dev, "file not found %s\n",
+ hw->fw.fw_file_name);
+ return ret;
+ }
+
+ ret = parse_main_fw(dev, fw);
+ if (ret || !fw) {
+ dev_err(&hw->spi->dev, "parse fw %s failed\n",
+ hw->fw.fw_file_name);
+ goto release;
+ }
+
+ if (hw->fw.fw_cnt < IMG_ARC_ACER_ACEV_ACECNF_EM7D) {
+ ret = request_firmware(&sensor_fw, hw->fw.sensor_file_name,
+ dev->dev);
+ if (ret < 0 || !sensor_fw) {
+ dev_err(&hw->spi->dev, "file not found %s\n",
+ hw->fw.sensor_file_name);
+ goto release;
+ }
+ ret = parse_sensor_fw(dev, sensor_fw);
+ if (ret) {
+ dev_err(&hw->spi->dev, "parse fw %s failed\n",
+ hw->fw.sensor_file_name);
+ goto release_sensor;
+ }
+ }
+
+ ret = request_firmware(&sku_cnf_fw, hw->fw.sku_cnf_file_name, dev->dev);
+ if (ret < 0 || !sku_cnf_fw) {
+ dev_err(&hw->spi->dev, "file not found %s\n",
+ hw->fw.sku_cnf_file_name);
+ goto release_sensor;
+ }
+
+ ret = parse_sku_cnf_fw(dev, sku_cnf_fw);
+ if (ret) {
+ dev_err(&hw->spi->dev, "parse fw %s failed\n",
+ hw->fw.sensor_file_name);
+ goto release_cnf;
+ }
+
+ ret = load_bootloader(dev);
+ if (ret)
+ goto release_cnf;
+
+ ret = load_fw(dev);
+ if (ret)
+ goto release_cnf;
+
+ return 0;
+
+release_cnf:
+ release_firmware(sku_cnf_fw);
+release_sensor:
+ release_firmware(sensor_fw);
+release:
+ release_firmware(fw);
+ return ret;
+}
+
+/**
+ * mei_vsc_fw_status - read fw status register from pci config space
+ *
+ * @dev: mei device
+ * @fw_status: fw status
+ *
+ * Return: 0 on success, error otherwise
+ */
+static int mei_vsc_fw_status(struct mei_device *dev,
+ struct mei_fw_status *fw_status)
+{
+ if (!fw_status)
+ return -EINVAL;
+
+ fw_status->count = 0;
+ return 0;
+}
+
+/**
+ * mei_vsc_pg_state - translate internal pg state
+ * to the mei power gating state
+ *
+ * @dev: mei device
+ *
+ * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
+ */
+static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *dev)
+{
+ return MEI_PG_OFF;
+}
+
+/**
+ * mei_vsc_intr_enable - enables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_intr_enable(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ enable_irq(hw->wakeuphostint);
+}
+
+/**
+ * mei_vsc_intr_disable - disables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_intr_disable(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ disable_irq(hw->wakeuphostint);
+}
+
+/**
+ * mei_vsc_intr_clear - clear and stop interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_intr_clear(struct mei_device *dev)
+{
+ ;
+}
+
+/**
+ * mei_vsc_synchronize_irq - wait for pending IRQ handlers
+ *
+ * @dev: the device structure
+ */
+static void mei_vsc_synchronize_irq(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ synchronize_irq(hw->wakeuphostint);
+}
+
+/**
+ * mei_vsc_hw_config - configure hw dependent settings
+ *
+ * @dev: mei device
+ *
+ * Return:
+ * * -EINVAL when read_fws is not set
+ * * 0 on success
+ *
+ */
+static int mei_vsc_hw_config(struct mei_device *dev)
+{
+ return 0;
+}
+
+/**
+ * mei_vsc_host_set_ready - enable device
+ *
+ * @dev: mei device
+ */
+static void mei_vsc_host_set_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ hw->host_ready = true;
+}
+
+/**
+ * mei_vsc_host_is_ready - check whether the host has turned ready
+ *
+ * @dev: mei device
+ * Return: bool
+ */
+static bool mei_vsc_host_is_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ return hw->host_ready;
+}
+
+/**
+ * mei_vsc_hw_is_ready - check whether the me(hw) has turned ready
+ *
+ * @dev: mei device
+ * Return: bool
+ */
+static bool mei_vsc_hw_is_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ return hw->fw_ready;
+}
+
+/**
+ * mei_vsc_hw_start - hw start routine
+ *
+ * @dev: mei device
+ * Return: 0 on success, error otherwise
+ */
+#define MEI_SPI_START_TIMEOUT 200
+static int mei_vsc_hw_start(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ u8 buf;
+ int len;
+ int ret;
+ int timeout = MEI_SPI_START_TIMEOUT;
+
+ mei_vsc_host_set_ready(dev);
+ atomic_set(&hw->lock_cnt, 0);
+ mei_vsc_intr_enable(dev);
+
+ /* wait for FW ready */
+ while (timeout > 0) {
+ msleep(50);
+ timeout -= 50;
+ ret = mei_vsc_read_raw(hw, &buf, sizeof(buf), &len);
+ if (!ret && ret != -EAGAIN)
+ break;
+ }
+
+ if (timeout <= 0)
+ return -ENODEV;
+
+ dev_dbg(dev->dev, "hw is ready\n");
+ hw->fw_ready = true;
+ return 0;
+}
+
+/**
+ * mei_vsc_hbuf_is_ready - checks if host buf is empty.
+ *
+ * @dev: the device structure
+ *
+ * Return: true if empty, false - otherwise.
+ */
+static bool mei_vsc_hbuf_is_ready(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ return hw->write_lock_cnt == 0;
+}
+
+/**
+ * mei_vsc_hbuf_empty_slots - counts write empty slots.
+ *
+ * @dev: the device structure
+ *
+ * Return: empty slots count
+ */
+static int mei_vsc_hbuf_empty_slots(struct mei_device *dev)
+{
+ return MAX_MEI_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+/**
+ * mei_vsc_hbuf_depth - returns depth of the hw buf.
+ *
+ * @dev: the device structure
+ *
+ * Return: size of hw buf in slots
+ */
+static u32 mei_vsc_hbuf_depth(const struct mei_device *dev)
+{
+ return MAX_MEI_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+/**
+ * mei_vsc_write - writes a message to FW.
+ *
+ * @dev: the device structure
+ * @hdr: header of message
+ * @hdr_len: header length in bytes: must be multiplication of a slot (4bytes)
+ * @data: payload
+ * @data_len: payload length in bytes
+ *
+ * Return: 0 if success, < 0 - otherwise.
+ */
+static int mei_vsc_write(struct mei_device *dev, const void *hdr,
+ size_t hdr_len, const void *data, size_t data_len)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+ char *buf = hw->tx_buf;
+
+ if (WARN_ON(!hdr || !data || hdr_len & 0x3 ||
+ data_len > MAX_SPI_MSG_SIZE)) {
+ dev_err(dev->dev,
+ "%s error write msg hdr_len %zu data_len %zu\n",
+ __func__, hdr_len, data_len);
+ return -EINVAL;
+ }
+
+ hw->write_lock_cnt++;
+ memcpy(buf, hdr, hdr_len);
+ memcpy(buf + hdr_len, data, data_len);
+ dev_dbg(dev->dev, "%s %d" MEI_HDR_FMT, __func__, hw->write_lock_cnt,
+ MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
+
+ ret = mei_vsc_write_raw(hw, buf, hdr_len + data_len);
+ if (ret)
+ dev_err(dev->dev, MEI_HDR_FMT "hdr_len %zu data len %zu\n",
+ MEI_HDR_PRM((struct mei_msg_hdr *)hdr), hdr_len,
+ data_len);
+
+ hw->write_lock_cnt--;
+ return ret;
+}
+
+/**
+ * mei_vsc_read
+ * read spi message
+ *
+ * @dev: the device structure
+ *
+ * Return: mei hdr value (u32)
+ */
+static inline u32 mei_vsc_read(const struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+
+ ret = mei_vsc_read_raw(hw, hw->rx_buf, sizeof(hw->rx_buf), &hw->rx_len);
+ if (ret || hw->rx_len < sizeof(u32))
+ return 0;
+
+ return *(u32 *)hw->rx_buf;
+}
+
+/**
+ * mei_vsc_count_full_read_slots - counts read full slots.
+ *
+ * @dev: the device structure
+ *
+ * Return: -EOVERFLOW if overflow, otherwise filled slots count
+ */
+static int mei_vsc_count_full_read_slots(struct mei_device *dev)
+{
+ return MAX_MEI_MSG_SIZE / MEI_SLOT_SIZE;
+}
+
+/**
+ * mei_vsc_read_slots - reads a message from mei device.
+ *
+ * @dev: the device structure
+ * @buf: message buf will be written
+ * @len: message size will be read
+ *
+ * Return: always 0
+ */
+static int mei_vsc_read_slots(struct mei_device *dev, unsigned char *buf,
+ unsigned long len)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct mei_msg_hdr *hdr;
+
+ hdr = (struct mei_msg_hdr *)hw->rx_buf;
+ WARN_ON(len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len);
+ memcpy(buf, hw->rx_buf + sizeof(*hdr), len);
+ return 0;
+}
+
+/**
+ * mei_vsc_pg_in_transition - is device now in pg transition
+ *
+ * @dev: the device structure
+ *
+ * Return: true if in pg transition, false otherwise
+ */
+static bool mei_vsc_pg_in_transition(struct mei_device *dev)
+{
+ return dev->pg_event >= MEI_PG_EVENT_WAIT &&
+ dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
+}
+
+/**
+ * mei_vsc_pg_is_enabled - detect if PG is supported by HW
+ *
+ * @dev: the device structure
+ *
+ * Return: true is pg supported, false otherwise
+ */
+static bool mei_vsc_pg_is_enabled(struct mei_device *dev)
+{
+ return false;
+}
+
+/**
+ * mei_vsc_hw_reset - resets fw.
+ *
+ * @dev: the device structure
+ * @intr_enable: if interrupt should be enabled after reset.
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_vsc_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+
+ mei_vsc_intr_disable(dev);
+ ret = vsc_reset(dev);
+ if (ret)
+ return ret;
+
+ if (hw->disconnect)
+ return 0;
+
+ ret = init_hw(dev);
+ if (ret)
+ return -ENODEV;
+
+ hw->seq = 0;
+ return 0;
+}
+
+/**
+ * mei_vsc_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: irqreturn_t
+ */
+irqreturn_t mei_vsc_irq_quick_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *)dev_id;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ dev_dbg(dev->dev, "interrupt top half lock_cnt %d state %d\n",
+ atomic_read(&hw->lock_cnt), dev->dev_state);
+
+ atomic_inc(&hw->lock_cnt);
+ wake_up(&hw->xfer_wait);
+ if (dev->dev_state == MEI_DEV_INITIALIZING ||
+ dev->dev_state == MEI_DEV_RESETTING)
+ return IRQ_HANDLED;
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mei_vsc_irq_thread_handler - function called after ISR to handle the interrupt
+ * processing.
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * Return: irqreturn_t
+ *
+ */
+irqreturn_t mei_vsc_irq_thread_handler(int irq, void *dev_id)
+{
+ struct mei_device *dev = (struct mei_device *)dev_id;
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct list_head cmpl_list;
+ s32 slots;
+ int rets = 0;
+
+ dev_dbg(dev->dev,
+ "function called after ISR to handle the interrupt processing dev->dev_state=%d.\n",
+ dev->dev_state);
+
+ /* initialize our complete list */
+ mutex_lock(&dev->device_lock);
+ INIT_LIST_HEAD(&cmpl_list);
+
+ /* check slots available for reading */
+ slots = mei_count_full_read_slots(dev);
+ dev_dbg(dev->dev, "slots to read = %08x\n", slots);
+
+reread:
+ while (spi_need_read(hw)) {
+ dev_dbg(dev->dev, "slots to read in = %08x\n", slots);
+ rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
+ /* There is a race between ME write and interrupt delivery:
+ * Not all data is always available immediately after the
+ * interrupt, so try to read again on the next interrupt.
+ */
+ if (rets == -ENODATA)
+ break;
+
+ if (rets && (dev->dev_state != MEI_DEV_RESETTING &&
+ dev->dev_state != MEI_DEV_POWER_DOWN)) {
+ dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
+ rets);
+ schedule_work(&dev->reset_work);
+ goto end;
+ }
+ }
+ dev_dbg(dev->dev, "slots to read out = %08x\n", slots);
+
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ rets = mei_irq_write_handler(dev, &cmpl_list);
+
+ dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ mei_irq_compl_handler(dev, &cmpl_list);
+
+ if (spi_need_read(hw))
+ goto reread;
+
+end:
+ dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ mutex_unlock(&dev->device_lock);
+ return IRQ_HANDLED;
+}
+
+static const struct mei_hw_ops mei_vsc_hw_ops = {
+
+ .fw_status = mei_vsc_fw_status,
+ .pg_state = mei_vsc_pg_state,
+
+ .host_is_ready = mei_vsc_host_is_ready,
+
+ .hw_is_ready = mei_vsc_hw_is_ready,
+ .hw_reset = mei_vsc_hw_reset,
+ .hw_config = mei_vsc_hw_config,
+ .hw_start = mei_vsc_hw_start,
+
+ .pg_in_transition = mei_vsc_pg_in_transition,
+ .pg_is_enabled = mei_vsc_pg_is_enabled,
+
+ .intr_clear = mei_vsc_intr_clear,
+ .intr_enable = mei_vsc_intr_enable,
+ .intr_disable = mei_vsc_intr_disable,
+ .synchronize_irq = mei_vsc_synchronize_irq,
+
+ .hbuf_free_slots = mei_vsc_hbuf_empty_slots,
+ .hbuf_is_ready = mei_vsc_hbuf_is_ready,
+ .hbuf_depth = mei_vsc_hbuf_depth,
+ .write = mei_vsc_write,
+
+ .rdbuf_full_slots = mei_vsc_count_full_read_slots,
+ .read_hdr = mei_vsc_read,
+ .read = mei_vsc_read_slots
+};
+
+/**
+ * mei_vsc_dev_init - allocates and initializes the mei device structure
+ *
+ * @parent: device associated with physical device (spi/platform)
+ *
+ * Return: The mei_device pointer on success, NULL on failure.
+ */
+struct mei_device *mei_vsc_dev_init(struct device *parent)
+{
+ struct mei_device *dev;
+ struct mei_vsc_hw *hw;
+
+ dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ mei_device_init(dev, parent, &mei_vsc_hw_ops);
+ dev->fw_f_fw_ver_supported = 0;
+ dev->kind = 0;
+ return dev;
+}
new file mode 100644
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+
+#ifndef _MEI_HW_SPI_H_
+#define _MEI_HW_SPI_H_
+
+#include <linux/irqreturn.h>
+#include <linux/spi/spi.h>
+#include <linux/mei.h>
+#include <linux/types.h>
+
+#include "mei_dev.h"
+#include "client.h"
+
+struct mei_cfg {
+ const struct mei_fw_status fw_status;
+ const char *kind;
+ u32 fw_ver_supported : 1;
+ u32 hw_trc_supported : 1;
+};
+
+enum FRAG_TYPE {
+ BOOT_IMAGE_TYPE,
+ ARC_SEM_IMG_TYPE,
+ EM7D_IMG_TYPE,
+ ACER_IMG_TYPE,
+ ACEV_IMG_TYPE,
+ ACEC_IMG_TYPE,
+ SKU_CONF_TYPE,
+ FRAGMENT_TYPE_MAX,
+};
+
+struct fragment {
+ enum FRAG_TYPE type;
+ u32 location;
+ const u8 *data;
+ u32 size;
+};
+
+irqreturn_t mei_vsc_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_vsc_irq_thread_handler(int irq, void *dev_id);
+struct mei_device *mei_vsc_dev_init(struct device *parent);
+
+#define VSC_MAGIC_NUM 0x49505343
+#define VSC_FILE_MAGIC 0x46564353
+#define VSC_FW_MAGIC 0x49574653
+#define VSC_ROM_SPI_PKG_SIZE 256
+#define FW_SPI_PKG_SIZE 512
+
+#define IMG_MAX_LOC (0x50FFFFFF)
+#define FW_MAX_SIZE (0x200000)
+#define SKU_CONFIG_LOC (0x5001A000)
+#define SKU_MAX_SIZE (4100)
+
+#define IMG_DMA_ENABLE_OPTION (1 << 0)
+
+#define SIG_SIZE 384
+#define PUBKEY_SIZE 384
+#define CSSHEADER_SIZE 128
+
+#define VSC_CMD_QUERY 0
+#define VSC_CMD_DL_SET 1
+#define VSC_CMD_DL_START 2
+#define VSC_CMD_DL_CONT 3
+#define VSC_CMD_DUMP_MEM 4
+#define VSC_CMD_SET_REG 5
+#define VSC_CMD_PRINT_ROM_VERSION 6
+#define VSC_CMD_WRITE_FLASH 7
+#define VSC_CMD_RESERVED 8
+
+enum IMAGE_TYPE {
+ IMG_DEBUG,
+ IMG_BOOTLOADER,
+ IMG_EM7D,
+ IMG_ARCSEM,
+ IMG_ACE_RUNTIME,
+ IMG_ACE_VISION,
+ IMG_ACE_CONFIG,
+ IMG_SKU_CONFIG
+};
+
+/*image count define, refer to Clover Fall Boot ROM HLD 1.0*/
+#define IMG_ACEV_ACECNF 2
+#define IMG_BOOT_ARC_EM7D 3
+#define IMG_BOOT_ARC_ACER_EM7D 4
+#define IMG_BOOT_ARC_ACER_ACEV_EM7D 5
+#define IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D 6
+#define IMG_ARC_ACER_ACEV_ACECNF_EM7D (IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D - 1)
+#define IMG_CNT_MAX IMG_BOOT_ARC_ACER_ACEV_ACECNF_EM7D
+
+#define VSC_TOKEN_BOOTLOADER_REQ 1
+#define VSC_TOKEN_FIRMWARE_REQ 2
+#define VSC_TOKEN_DOWNLOAD_CONT 3
+#define VSC_TOKEN_DUMP_RESP 4
+#define VSC_TOKEN_DUMP_CONT 5
+#define VSC_TOKEN_SKU_CONFIG_REQ 6
+#define VSC_TOKEN_ERROR 7
+#define VSC_TOKEN_DUMMY 8
+#define VSC_TOKEN_CAM_STATUS_RESP 9
+#define VSC_TOKEN_CAM_BOOT 10
+
+#define MAX_SVN_VALUE (0xFFFFFFFE)
+
+#define EFUSE1_ADDR (0xE0030000 + 0x38)
+#define STRAP_ADDR (0xE0030000 + 0x100)
+
+#define SI_MAINSTEPPING_VERSION_OFFSET (4)
+#define SI_MAINSTEPPING_VERSION_MASK (0xF)
+#define SI_MAINSTEPPING_VERSION_A (0x0)
+#define SI_MAINSTEPPING_VERSION_B (0x1)
+#define SI_MAINSTEPPING_VERSION_C (0x2)
+
+#define SI_SUBSTEPPING_VERSION_OFFSET (0x0)
+#define SI_SUBSTEPPING_VERSION_MASK (0xF)
+#define SI_SUBSTEPPING_VERSION_0 (0x0)
+#define SI_SUBSTEPPING_VERSION_0_PRIME (0x1)
+#define SI_SUBSTEPPING_VERSION_1 (0x2)
+#define SI_SUBSTEPPING_VERSION_1_PRIME (0x3)
+
+#define SI_STRAP_KEY_SRC_OFFSET (16)
+#define SI_STRAP_KEY_SRC_MASK (0x1)
+
+#define SI_STRAP_KEY_SRC_DEBUG (0x0)
+#define SI_STRAP_KEY_SRC_PRODUCT (0x1)
+
+struct vsc_rom_master_frame {
+ u32 magic;
+ u8 cmd;
+ union {
+ struct {
+ u8 img_type;
+ u16 option;
+ u32 img_len;
+ u32 img_loc;
+ u32 crc;
+ u8 res[0];
+ } __packed dl_start;
+ struct {
+ u8 option;
+ u16 img_cnt;
+ u32 payload[(VSC_ROM_SPI_PKG_SIZE - 8) / 4];
+ } __packed dl_set;
+ struct {
+ u8 end_flag;
+ u16 len;
+ u8 payload[VSC_ROM_SPI_PKG_SIZE - 8];
+ } __packed dl_cont;
+ struct {
+ u8 res;
+ u16 len;
+ u32 addr;
+#define ROM_DUMP_MEM_RESERVE_SIZE 12
+ u8 payload[VSC_ROM_SPI_PKG_SIZE -
+ ROM_DUMP_MEM_RESERVE_SIZE];
+ } __packed dump_mem;
+ struct {
+ u8 res[3];
+ u32 addr;
+ u32 val;
+#define ROM_SET_REG_RESERVE_SIZE 16
+ u8 payload[VSC_ROM_SPI_PKG_SIZE -
+ ROM_SET_REG_RESERVE_SIZE];
+ } __packed set_reg;
+ struct {
+ u8 ins[0];
+ } __packed undoc_f1;
+ struct {
+ u32 addr;
+ u32 len;
+ u8 payload[0];
+ } __packed os_dump_mem;
+ u8 reserve[VSC_ROM_SPI_PKG_SIZE - 5];
+ } data;
+} __packed;
+
+struct vsc_fw_master_frame {
+ u32 magic;
+ u8 cmd;
+ union {
+ struct {
+ u16 option;
+ u8 img_type;
+ u32 img_len;
+ u32 img_loc;
+ u32 crc;
+ u8 res[0];
+ } __packed dl_start;
+ struct {
+ u16 option;
+ u8 img_cnt;
+ u32 payload[(FW_SPI_PKG_SIZE - 8) / 4];
+ } __packed dl_set;
+ struct {
+ u8 end_flag;
+ u16 len;
+ u8 payload[FW_SPI_PKG_SIZE - 8];
+ } __packed dl_cont;
+ struct {
+ u32 addr;
+ u8 len;
+ u8 payload[0];
+ } __packed dump_mem;
+ struct {
+ u32 addr;
+ u32 val;
+ u8 payload[0];
+ } __packed set_reg;
+ struct {
+ u8 ins[0];
+ } __packed undoc_f1;
+ struct {
+ u32 addr;
+ u32 len;
+ u8 payload[0];
+ } __packed os_dump_mem;
+ struct {
+ u8 resv[3];
+ u32 check_sum;
+#define LOADER_BOOT_RESERVE_SIZE 12
+ u8 payload[FW_SPI_PKG_SIZE - LOADER_BOOT_RESERVE_SIZE];
+ } __packed boot;
+ u8 reserve[FW_SPI_PKG_SIZE - 5];
+ } data;
+} __packed;
+
+struct vsc_master_frame_fw_cont {
+ u8 payload[FW_SPI_PKG_SIZE];
+} __packed;
+
+struct vsc_rom_slave_token {
+ u32 magic;
+ u8 token;
+ u8 type;
+ u8 res[2];
+ u8 payload[VSC_ROM_SPI_PKG_SIZE - 8];
+} __packed;
+
+struct vsc_bol_slave_token {
+ u32 magic;
+ u8 token;
+ u8 type;
+ u8 res[2];
+ u8 payload[FW_SPI_PKG_SIZE - 8];
+} __packed;
+
+struct vsc_boot_img {
+ u32 magic;
+ u32 option;
+ u32 image_count;
+ u32 image_loc[IMG_CNT_MAX];
+} __packed;
+
+struct vsc_sensor_img_t {
+ u32 magic;
+ u32 option;
+ u32 image_count;
+ u32 image_loc[IMG_ACEV_ACECNF];
+} __packed;
+
+struct bootloader_sign {
+ u32 magic;
+ u32 image_size;
+ u8 image[0];
+} __packed;
+
+struct manifest {
+ u32 svn;
+ u32 header_ver;
+ u32 comp_flags;
+ u32 comp_name;
+ u32 comp_vendor_name;
+ u32 module_size;
+ u32 module_addr;
+} __packed;
+
+struct firmware_sign {
+ u32 magic;
+ u32 image_size;
+ u8 image[1];
+} __packed;
+
+/* spi transport layer */
+#define PACKET_SYNC 0x31
+#define MAX_SPI_MSG_SIZE 2048
+#define MAX_MEI_MSG_SIZE 512
+
+#define CRC_SIZE sizeof(u32)
+#define PACKET_SIZE(pkt) (sizeof(pkt->hdr) + (pkt->hdr.len) + (CRC_SIZE))
+#define MAX_PACKET_SIZE \
+ (sizeof(struct spi_xfer_hdr) + MAX_SPI_MSG_SIZE + (CRC_SIZE))
+
+/* SPI xfer timeout size definition */
+#define XFER_TIMEOUT_BYTES 700
+#define MAX_XFER_BUFFER_SIZE ((MAX_PACKET_SIZE) + (XFER_TIMEOUT_BYTES))
+
+struct spi_xfer_hdr {
+ u8 sync;
+ u8 cmd;
+ u16 len;
+ u32 seq;
+} __packed;
+
+struct spi_xfer_packet {
+ struct spi_xfer_hdr hdr;
+ u8 buf[MAX_XFER_BUFFER_SIZE - sizeof(struct spi_xfer_hdr)];
+} __packed;
+
+#define CMD_SPI_WRITE 0x01
+#define CMD_SPI_READ 0x02
+#define CMD_SPI_RESET_NOTIFY 0x04
+
+#define CMD_SPI_ACK 0x10
+#define CMD_SPI_NACK 0x11
+#define CMD_SPI_BUSY 0x12
+#define CMD_SPI_FATAL_ERR 0x13
+
+struct host_timestamp {
+ u64 realtime;
+ u64 boottime;
+} __packed;
+
+struct vsc_boot_fw {
+ u32 main_ver;
+ u32 sub_ver;
+ u32 key_src;
+ u32 svn;
+
+ u8 tx_buf[FW_SPI_PKG_SIZE];
+ u8 rx_buf[FW_SPI_PKG_SIZE];
+
+ /* FirmwareBootFile */
+ char fw_file_name[256];
+ /* PkgBootFile */
+ char sensor_file_name[256];
+ /* SkuConfigBootFile */
+ char sku_cnf_file_name[256];
+
+ u32 fw_option;
+ u32 fw_cnt;
+ struct fragment frags[FRAGMENT_TYPE_MAX];
+};
+
+struct mei_vsc_hw {
+ struct spi_device *spi;
+ struct spi_transfer xfer;
+ struct spi_message msg;
+ u8 rx_buf[MAX_SPI_MSG_SIZE];
+ u8 tx_buf[MAX_SPI_MSG_SIZE];
+ u32 rx_len;
+
+ int wakeuphostint;
+ struct gpio_desc *wakeuphost;
+ struct gpio_desc *resetfw;
+ struct gpio_desc *wakeupfw;
+
+ struct vsc_boot_fw fw;
+ bool host_ready;
+ bool fw_ready;
+
+ /* mei transport layer */
+ u32 seq;
+ u8 tx_buf1[MAX_XFER_BUFFER_SIZE];
+ u8 rx_buf1[MAX_XFER_BUFFER_SIZE];
+
+ struct mutex mutex;
+ bool disconnect;
+ atomic_t lock_cnt;
+ int write_lock_cnt;
+ wait_queue_head_t xfer_wait;
+ char cam_sensor_name[32];
+};
+
+#define to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw))
+
+#endif
new file mode 100644
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021, Intel Corporation. All rights reserved.
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mei.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#include "client.h"
+#include "hw-vsc.h"
+#include "mei_dev.h"
+
+#define CVFD_ACPI_ID_TGL "INTC1059"
+#define CVFD_ACPI_ID_ADL "INTC1095"
+#define LINK_NUMBER (1)
+#define METHOD_NAME_SID "SID"
+
+/* gpio resources */
+static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false };
+static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false };
+static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false };
+static const struct acpi_gpio_params wakeupfw = { 3, 0, false };
+static const struct acpi_gpio_mapping mei_vsc_acpi_gpios[] = {
+ { "wakeuphost-gpios", &wakeuphost_gpio, 1 },
+ { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 },
+ { "resetfw-gpios", &resetfw_gpio, 1 },
+ { "wakeupfw-gpios", &wakeupfw, 1 },
+ {}
+};
+
+static struct acpi_device *find_cvfd_child_adev(struct acpi_device *parent)
+{
+ struct acpi_device *adev;
+
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry (adev, &parent->children, node) {
+ if (!strcmp(CVFD_ACPI_ID_TGL, acpi_device_hid(adev)) ||
+ !strcmp(CVFD_ACPI_ID_ADL, acpi_device_hid(adev)))
+ return adev;
+ }
+
+ return NULL;
+}
+
+static int get_sensor_name(struct mei_device *dev)
+{
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ struct spi_device *spi = hw->spi;
+ struct acpi_device *adev;
+ union acpi_object obj = { .type = ACPI_TYPE_INTEGER };
+ union acpi_object *ret_obj;
+ struct acpi_object_list arg_list = {
+ .count = 1,
+ .pointer = &obj,
+ };
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ char *c;
+
+ adev = find_cvfd_child_adev(ACPI_COMPANION(&spi->dev));
+ if (!adev) {
+ dev_err(&spi->dev, "ACPI not found CVFD device\n");
+ return -ENODEV;
+ }
+
+ obj.integer.value = LINK_NUMBER;
+ status = acpi_evaluate_object(adev->handle, METHOD_NAME_SID, &arg_list,
+ &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&spi->dev, "can't evaluate SID method: %d\n", status);
+ return -ENODEV;
+ }
+
+ ret_obj = buffer.pointer;
+ dev_dbg(&spi->dev, "SID status %d %lld %d - %d %s %d\n", status,
+ buffer.length, ret_obj->type, ret_obj->string.length,
+ ret_obj->string.pointer,
+ acpi_has_method(adev->handle, METHOD_NAME_SID));
+
+ if (ret_obj->string.length > sizeof(hw->cam_sensor_name)) {
+ ACPI_FREE(buffer.pointer);
+ return -EINVAL;
+ }
+ memcpy(hw->cam_sensor_name, ret_obj->string.pointer,
+ ret_obj->string.length);
+
+ /* camera sensor name are all in lower case */
+ for (c = hw->cam_sensor_name; *c != '\0'; c++)
+ *c = tolower(*c);
+
+ ACPI_FREE(buffer.pointer);
+ return 0;
+}
+
+static int mei_vsc_probe(struct spi_device *spi)
+{
+ struct mei_vsc_hw *hw;
+ struct mei_device *dev;
+ int ret;
+
+ dev = mei_vsc_dev_init(&spi->dev);
+ if (!dev)
+ return -ENOMEM;
+
+ hw = to_vsc_hw(dev);
+ mutex_init(&hw->mutex);
+ init_waitqueue_head(&hw->xfer_wait);
+ hw->spi = spi;
+ spi_set_drvdata(spi, dev);
+
+ ret = get_sensor_name(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_acpi_dev_add_driver_gpios(&spi->dev, mei_vsc_acpi_gpios);
+ if (ret) {
+ dev_err(&spi->dev, "%s: fail to add gpio\n", __func__);
+ return -EBUSY;
+ }
+
+ hw->wakeuphost = devm_gpiod_get(&spi->dev, "wakeuphost", GPIOD_IN);
+ if (IS_ERR(hw->wakeuphost)) {
+ dev_err(&spi->dev, "gpio get irq failed\n");
+ return -EINVAL;
+ }
+ hw->resetfw = devm_gpiod_get(&spi->dev, "resetfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(hw->resetfw)) {
+ dev_err(&spi->dev, "gpio get resetfw failed\n");
+ return -EINVAL;
+ }
+ hw->wakeupfw = devm_gpiod_get(&spi->dev, "wakeupfw", GPIOD_OUT_HIGH);
+ if (IS_ERR(hw->wakeupfw)) {
+ dev_err(&spi->dev, "gpio get wakeupfw failed\n");
+ return -EINVAL;
+ }
+
+ ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&spi->dev),
+ "wakeuphostint-gpios", 0);
+ if (ret < 0)
+ return ret;
+
+ hw->wakeuphostint = ret;
+ irq_set_status_flags(hw->wakeuphostint, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(hw->wakeuphostint, mei_vsc_irq_quick_handler,
+ mei_vsc_irq_thread_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ KBUILD_MODNAME, dev);
+ if (mei_start(dev)) {
+ dev_err(&spi->dev, "init hw failure.\n");
+ ret = -ENODEV;
+ goto release_irq;
+ }
+
+ ret = mei_register(dev, &spi->dev);
+ if (ret)
+ goto stop;
+
+ pm_runtime_enable(dev->dev);
+ dev_dbg(&spi->dev, "initialization successful.\n");
+ return 0;
+
+stop:
+ mei_stop(dev);
+release_irq:
+ mei_cancel_work(dev);
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+ return ret;
+}
+
+static int __maybe_unused mei_vsc_suspend(struct device *device)
+{
+ struct spi_device *spi = to_spi_device(device);
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ if (!dev)
+ return -ENODEV;
+
+ dev_dbg(dev->dev, "%s\n", __func__);
+
+ hw->disconnect = true;
+ mei_stop(dev);
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+ return 0;
+}
+
+static int __maybe_unused mei_vsc_resume(struct device *device)
+{
+ struct spi_device *spi = to_spi_device(device);
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+ int ret;
+
+ dev_dbg(dev->dev, "%s\n", __func__);
+ irq_set_status_flags(hw->wakeuphostint, IRQ_DISABLE_UNLAZY);
+ ret = request_threaded_irq(hw->wakeuphostint, mei_vsc_irq_quick_handler,
+ mei_vsc_irq_thread_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(device, "request_threaded_irq failed: irq = %d.\n",
+ hw->wakeuphostint);
+ return ret;
+ }
+
+ hw->disconnect = false;
+ ret = mei_restart(dev);
+ if (ret)
+ return ret;
+
+ /* Start timer if stopped in suspend */
+ schedule_delayed_work(&dev->timer_work, HZ);
+ return 0;
+}
+
+static int mei_vsc_remove(struct spi_device *spi)
+{
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ dev_info(&spi->dev, "%s %d", __func__, hw->wakeuphostint);
+
+ pm_runtime_disable(dev->dev);
+ hw->disconnect = true;
+ mei_stop(dev);
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+ mei_deregister(dev);
+ mutex_destroy(&hw->mutex);
+ return 0;
+}
+
+/**
+ * mei_vsc_shutdown - Device Removal Routine
+ *
+ * @spi: SPI device structure
+ *
+ * mei_vsc_shutdown is called from the reboot notifier
+ * it's a simplified version of remove so we go down
+ * faster.
+ */
+static void mei_vsc_shutdown(struct spi_device *spi)
+{
+ struct mei_device *dev = spi_get_drvdata(spi);
+ struct mei_vsc_hw *hw = to_vsc_hw(dev);
+
+ dev_dbg(dev->dev, "shutdown\n");
+ hw->disconnect = true;
+ mei_stop(dev);
+
+ mei_disable_interrupts(dev);
+ free_irq(hw->wakeuphostint, dev);
+}
+
+static const struct dev_pm_ops mei_vsc_pm_ops = {
+
+ SET_SYSTEM_SLEEP_PM_OPS(mei_vsc_suspend, mei_vsc_resume)
+};
+
+static const struct acpi_device_id mei_vsc_acpi_ids[] = {
+ { "INTC1058", 1 },
+ { "INTC1094", 1 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, mei_vsc_acpi_ids);
+
+static struct spi_driver mei_vsc_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .acpi_match_table = ACPI_PTR(mei_vsc_acpi_ids),
+ .pm = &mei_vsc_pm_ops,
+ },
+ .probe = mei_vsc_probe,
+ .remove = mei_vsc_remove,
+ .shutdown = mei_vsc_shutdown,
+ .driver.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+};
+module_spi_driver(mei_vsc_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_DESCRIPTION("Intel MEI VSC driver");
+MODULE_LICENSE("GPL v2");
@@ -413,6 +413,16 @@ config SPI_JCORE
This enables support for the SPI master controller in the J-Core
synthesizable, open source SoC.
+config SPI_LJCA
+ tristate "INTEL La Jolla Cove Adapter SPI support"
+ depends on MFD_LJCA
+ help
+ Select this option to enable SPI driver for the INTEL
+ La Jolla Cove Adapter (LJCA) board.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-ljca.
+
config SPI_LM70_LLP
tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
depends on PARPORT
@@ -61,6 +61,7 @@ obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
+obj-$(CONFIG_SPI_LJCA) += spi-ljca.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
obj-$(CONFIG_SPI_MESON_SPICC) += spi-meson-spicc.o
new file mode 100644
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel La Jolla Cove Adapter USB-SPI driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/mfd/ljca.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI commands */
+enum ljca_spi_cmd {
+ LJCA_SPI_INIT = 1,
+ LJCA_SPI_READ,
+ LJCA_SPI_WRITE,
+ LJCA_SPI_WRITEREAD,
+ LJCA_SPI_DEINIT,
+};
+
+#define LJCA_SPI_BUS_MAX_HZ 48000000
+enum {
+ LJCA_SPI_BUS_SPEED_24M,
+ LJCA_SPI_BUS_SPEED_12M,
+ LJCA_SPI_BUS_SPEED_8M,
+ LJCA_SPI_BUS_SPEED_6M,
+ LJCA_SPI_BUS_SPEED_4_8M, /*4.8MHz*/
+ LJCA_SPI_BUS_SPEED_MIN = LJCA_SPI_BUS_SPEED_4_8M,
+};
+
+enum {
+ LJCA_SPI_CLOCK_LOW_POLARITY,
+ LJCA_SPI_CLOCK_HIGH_POLARITY,
+};
+
+enum {
+ LJCA_SPI_CLOCK_FIRST_PHASE,
+ LJCA_SPI_CLOCK_SECOND_PHASE,
+};
+
+#define LJCA_SPI_BUF_SIZE 60
+#define LJCA_SPI_MAX_XFER_SIZE \
+ (LJCA_SPI_BUF_SIZE - sizeof(struct spi_xfer_packet))
+union spi_clock_mode {
+ struct {
+ u8 polarity : 1;
+ u8 phrase : 1;
+ u8 reserved : 6;
+ } u;
+
+ u8 mode;
+} __packed;
+
+struct spi_init_packet {
+ u8 index;
+ u8 speed;
+ union spi_clock_mode mode;
+} __packed;
+
+struct spi_xfer_indicator {
+ u8 id : 6;
+ u8 cmpl : 1;
+ u8 index : 1;
+};
+
+struct spi_xfer_packet {
+ struct spi_xfer_indicator indicator;
+ s8 len;
+ u8 data[];
+} __packed;
+
+struct ljca_spi_dev {
+ struct platform_device *pdev;
+ struct ljca_spi_info *ctr_info;
+ struct spi_master *master;
+ u8 speed;
+ u8 mode;
+
+ u8 obuf[LJCA_SPI_BUF_SIZE];
+ u8 ibuf[LJCA_SPI_BUF_SIZE];
+};
+
+static int ljca_spi_read_write(struct ljca_spi_dev *ljca_spi, const u8 *w_data,
+ u8 *r_data, int len, int id, int complete,
+ int cmd)
+{
+ struct spi_xfer_packet *w_packet =
+ (struct spi_xfer_packet *)ljca_spi->obuf;
+ struct spi_xfer_packet *r_packet =
+ (struct spi_xfer_packet *)ljca_spi->ibuf;
+ int ret;
+ int ibuf_len;
+
+ w_packet->indicator.index = ljca_spi->ctr_info->id;
+ w_packet->indicator.id = id;
+ w_packet->indicator.cmpl = complete;
+
+ if (cmd == LJCA_SPI_READ) {
+ w_packet->len = sizeof(u16);
+ *(u16 *)&w_packet->data[0] = len;
+ } else {
+ w_packet->len = len;
+ memcpy(w_packet->data, w_data, len);
+ }
+
+ ret = ljca_transfer(ljca_spi->pdev, cmd, w_packet,
+ sizeof(*w_packet) + w_packet->len, r_packet,
+ &ibuf_len);
+ if (ret)
+ return ret;
+
+ if (ibuf_len < sizeof(*r_packet) || r_packet->len <= 0) {
+ dev_err(&ljca_spi->pdev->dev, "receive patcket error len %d\n",
+ r_packet->len);
+ return -EIO;
+ }
+
+ if (r_data)
+ memcpy(r_data, r_packet->data, r_packet->len);
+
+ return 0;
+}
+
+static int ljca_spi_init(struct ljca_spi_dev *ljca_spi, int div, int mode)
+{
+ struct spi_init_packet w_packet = { 0 };
+ int ret;
+
+ if (ljca_spi->mode == mode && ljca_spi->speed == div)
+ return 0;
+
+ if (mode & SPI_CPOL)
+ w_packet.mode.u.polarity = LJCA_SPI_CLOCK_HIGH_POLARITY;
+ else
+ w_packet.mode.u.polarity = LJCA_SPI_CLOCK_LOW_POLARITY;
+
+ if (mode & SPI_CPHA)
+ w_packet.mode.u.phrase = LJCA_SPI_CLOCK_SECOND_PHASE;
+ else
+ w_packet.mode.u.phrase = LJCA_SPI_CLOCK_FIRST_PHASE;
+
+ w_packet.index = ljca_spi->ctr_info->id;
+ w_packet.speed = div;
+ ret = ljca_transfer(ljca_spi->pdev, LJCA_SPI_INIT, &w_packet,
+ sizeof(w_packet), NULL, NULL);
+ if (ret)
+ return ret;
+
+ ljca_spi->mode = mode;
+ ljca_spi->speed = div;
+ return 0;
+}
+
+static int ljca_spi_deinit(struct ljca_spi_dev *ljca_spi)
+{
+ struct spi_init_packet w_packet = { 0 };
+
+ w_packet.index = ljca_spi->ctr_info->id;
+ return ljca_transfer(ljca_spi->pdev, LJCA_SPI_DEINIT, &w_packet,
+ sizeof(w_packet), NULL, NULL);
+}
+
+static int ljca_spi_transfer(struct ljca_spi_dev *ljca_spi, const u8 *tx_data,
+ u8 *rx_data, u16 len)
+{
+ int ret;
+ int remaining = len;
+ int offset = 0;
+ int cur_len;
+ int complete = 0;
+ int i;
+
+ for (i = 0; remaining > 0;
+ offset += cur_len, remaining -= cur_len, i++) {
+ dev_dbg(&ljca_spi->pdev->dev,
+ "fragment %d offset %d remaining %d ret %d\n", i,
+ offset, remaining, ret);
+
+ if (remaining > LJCA_SPI_MAX_XFER_SIZE) {
+ cur_len = LJCA_SPI_MAX_XFER_SIZE;
+ } else {
+ cur_len = remaining;
+ complete = 1;
+ }
+
+ if (tx_data && rx_data)
+ ret = ljca_spi_read_write(ljca_spi, tx_data + offset,
+ rx_data + offset, cur_len, i,
+ complete, LJCA_SPI_WRITEREAD);
+ else if (tx_data)
+ ret = ljca_spi_read_write(ljca_spi, tx_data + offset,
+ NULL, cur_len, i, complete,
+ LJCA_SPI_WRITE);
+ else if (rx_data)
+ ret = ljca_spi_read_write(ljca_spi, NULL,
+ rx_data + offset, cur_len, i,
+ complete, LJCA_SPI_READ);
+ else
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ljca_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct ljca_spi_dev *ljca_spi = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+
+ dev_dbg(&ljca_spi->pdev->dev, "cs %d\n", spi->chip_select);
+ return 0;
+}
+
+static int ljca_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct ljca_spi_dev *ljca_spi = spi_master_get_devdata(master);
+ int ret;
+ int div;
+
+ div = DIV_ROUND_UP(master->max_speed_hz, xfer->speed_hz) / 2 - 1;
+ if (div > LJCA_SPI_BUS_SPEED_MIN)
+ div = LJCA_SPI_BUS_SPEED_MIN;
+
+ ret = ljca_spi_init(ljca_spi, div, spi->mode);
+ if (ret < 0) {
+ dev_err(&ljca_spi->pdev->dev,
+ "cannot initialize transfer ret %d\n", ret);
+ return ret;
+ }
+
+ ret = ljca_spi_transfer(ljca_spi, xfer->tx_buf, xfer->rx_buf,
+ xfer->len);
+ if (ret < 0)
+ dev_err(&ljca_spi->pdev->dev, "ljca spi transfer failed!\n");
+
+ return ret;
+}
+
+static int ljca_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ljca_spi_dev *ljca_spi;
+ struct ljca_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*ljca_spi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+ ljca_spi = spi_master_get_devdata(master);
+
+ ljca_spi->ctr_info = &pdata->spi_info;
+ ljca_spi->master = master;
+ ljca_spi->master->dev.of_node = pdev->dev.of_node;
+ ljca_spi->pdev = pdev;
+
+ ACPI_COMPANION_SET(&ljca_spi->master->dev, ACPI_COMPANION(&pdev->dev));
+
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
+ master->prepare_message = ljca_spi_prepare_message;
+ master->transfer_one = ljca_spi_transfer_one;
+ master->auto_runtime_pm = false;
+ master->max_speed_hz = LJCA_SPI_BUS_MAX_HZ;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto exit_free_master;
+ }
+
+ return ret;
+
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int ljca_spi_dev_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct ljca_spi_dev *ljca_spi = spi_master_get_devdata(master);
+
+ ljca_spi_deinit(ljca_spi);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ljca_spi_dev_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int ljca_spi_dev_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops ljca_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(ljca_spi_dev_suspend, ljca_spi_dev_resume)
+};
+
+static struct platform_driver spi_ljca_driver = {
+ .driver = {
+ .name = "ljca-spi",
+ .pm = &ljca_spi_pm,
+ },
+ .probe = ljca_spi_probe,
+ .remove = ljca_spi_dev_remove,
+};
+
+module_platform_driver(spi_ljca_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_DESCRIPTION("Intel La Jolla Cove Adapter USB-SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ljca-spi");
@@ -174,4 +174,6 @@ source "drivers/usb/typec/Kconfig"
source "drivers/usb/roles/Kconfig"
+source "drivers/usb/intel_ulpss/Kconfig"
+
endif # USB_SUPPORT
@@ -66,3 +66,5 @@ obj-$(CONFIG_USBIP_CORE) += usbip/
obj-$(CONFIG_TYPEC) += typec/
obj-$(CONFIG_USB_ROLE_SWITCH) += roles/
+
+obj-$(CONFIG_INTEL_LPSS_USB) += intel_ulpss/
new file mode 100644
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config INTEL_LPSS_USB
+ tristate "Intel Low Power Subsystem support as USB devices"
+ depends on USB
+ help
+ This driver supports USB-interfaced Intel Low Power Subsystem
+ (LPSS) devices such as I2C, GPIO.
+ Say Y or M here if you have LPSS USB devices.
+ To compile this driver as a module, choose M here: the
+ module will be called intel_lpss_usb.ko.
new file mode 100644
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INTEL_LPSS_USB) += intel_lpss_usb.o
+intel_lpss_usb-y := ulpss_bridge.o usb_stub.o mng_stub.o i2c_stub.o gpio_stub.o diag_stub.o
new file mode 100644
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/types.h>
+
+#include "diag_stub.h"
+
+struct diag_stub_priv {
+ struct usb_stub *stub;
+};
+
+int diag_get_state(struct usb_stub *stub)
+{
+ if (!stub)
+ return -EINVAL;
+
+ return usb_stub_write(stub, DIAG_GET_STATE, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+}
+
+int diag_get_fw_log(struct usb_stub *stub, u8 *buf, ssize_t *len)
+{
+ int ret;
+
+ mutex_lock(&stub->stub_mutex);
+ ret = usb_stub_write(stub, DIAG_GET_FW_LOG, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+
+ *len = stub->len;
+ if (!ret && stub->len > 0)
+ memcpy(buf, stub->buf, stub->len);
+
+ mutex_unlock(&stub->stub_mutex);
+ return ret;
+}
+
+int diag_get_coredump(struct usb_stub *stub, u8 *buf, ssize_t *len)
+{
+ int ret;
+
+ mutex_lock(&stub->stub_mutex);
+ ret = usb_stub_write(stub, DIAG_GET_FW_COREDUMP, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+
+ *len = stub->len;
+ if (!ret && !stub->len)
+ memcpy(buf, stub->buf, stub->len);
+
+ mutex_unlock(&stub->stub_mutex);
+
+ return ret;
+}
+
+int diag_get_statistic_info(struct usb_stub *stub)
+{
+ int ret;
+
+ if (stub == NULL)
+ return -EINVAL;
+
+ mutex_lock(&stub->stub_mutex);
+ ret = usb_stub_write(stub, DIAG_GET_STATISTIC, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+ mutex_unlock(&stub->stub_mutex);
+
+ return ret;
+}
+
+int diag_set_trace_level(struct usb_stub *stub, u8 level)
+{
+ int ret;
+
+ if (stub == NULL)
+ return -EINVAL;
+
+ mutex_lock(&stub->stub_mutex);
+ ret = usb_stub_write(stub, DIAG_SET_TRACE_LEVEL, &level, sizeof(level),
+ true, USB_WRITE_ACK_TIMEOUT);
+ mutex_unlock(&stub->stub_mutex);
+
+ return ret;
+}
+
+static void diag_stub_cleanup(struct usb_stub *stub)
+{
+ BUG_ON(!stub);
+ if (stub->priv)
+ kfree(stub->priv);
+
+ return;
+}
+
+int diag_stub_init(struct usb_interface *intf, void *cookie, u8 len)
+{
+ struct usb_stub *stub = usb_stub_alloc(intf);
+ struct diag_stub_priv *priv;
+
+ if (!intf || !stub)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(struct diag_stub_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->stub = stub;
+
+ stub->priv = priv;
+ stub->type = DIAG_CMD_TYPE;
+ stub->intf = intf;
+ stub->cleanup = diag_stub_cleanup;
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#ifndef __DIAG_STUB_H__
+#define __DIAG_STUB_H__
+
+#include "usb_stub.h"
+
+int diag_set_trace_level(struct usb_stub *stub, u8 level);
+int diag_get_statistic_info(struct usb_stub *stub);
+int diag_stub_init(struct usb_interface *intf, void *cookie, u8 len);
+int diag_get_state(struct usb_stub *stub);
+int diag_get_fw_log(struct usb_stub *stub, u8 *buf, ssize_t *len);
+int diag_get_coredump(struct usb_stub *stub, u8 *buf, ssize_t *len);
+#endif
new file mode 100644
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/gpio/driver.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include "gpio_stub.h"
+#include "protocol_intel_ulpss.h"
+#include "usb_stub.h"
+
+#define USB_BRIDGE_GPIO_HID "INTC1074"
+struct pin_info {
+ u8 bank_id;
+ bool valid;
+ u8 connect_mode;
+};
+
+struct gpio_stub_priv {
+ u32 id;
+ struct usb_stub *stub;
+
+ /** gpio descriptor */
+ struct gpio_descriptor descriptor;
+ struct pin_info *pin_info_table;
+
+ struct device dev;
+ u32 valid_gpio_num;
+ u32 total_gpio_num;
+ struct gpio_chip gpio_chip;
+
+ bool ready;
+};
+
+/** stub function */
+static int gpio_stub_parse(struct usb_stub *stub, u8 cmd, u8 flags, u8 *data,
+ u32 len)
+{
+ if (!stub)
+ return -EINVAL;
+
+ if (cmd == GPIO_INTR_NOTIFY)
+ if (stub->notify)
+ stub->notify(stub, GPIO_INTR_EVENT, NULL);
+
+ return 0;
+}
+
+static void gpio_stub_cleanup(struct usb_stub *stub)
+{
+ struct gpio_stub_priv *priv = stub->priv;
+
+ if (!stub || !priv)
+ return;
+
+ dev_dbg(&stub->intf->dev, "%s unregister gpio dev\n", __func__);
+
+ if (priv->ready) {
+ gpiochip_remove(&priv->gpio_chip);
+ device_unregister(&priv->dev);
+ }
+
+ if (priv->pin_info_table) {
+ kfree(priv->pin_info_table);
+ priv->pin_info_table = NULL;
+ }
+ kfree(priv);
+
+ return;
+}
+
+static int gpio_stub_update_descriptor(struct usb_stub *stub,
+ struct gpio_descriptor *descriptor,
+ u8 len)
+{
+ struct gpio_stub_priv *priv = stub->priv;
+ u32 i, j;
+ int pin_id;
+
+ if (!priv || !descriptor ||
+ len != offsetof(struct gpio_descriptor, bank_table) +
+ sizeof(struct bank_descriptor) *
+ descriptor->banks ||
+ len > sizeof(priv->descriptor))
+ return -EINVAL;
+
+ if ((descriptor->pins_per_bank <= 0) || (descriptor->banks <= 0)) {
+ dev_err(&stub->intf->dev, "%s pins_per_bank:%d bans:%d\n",
+ __func__, descriptor->pins_per_bank, descriptor->banks);
+ return -EINVAL;
+ }
+
+ priv->total_gpio_num = descriptor->pins_per_bank * descriptor->banks;
+ memcpy(&priv->descriptor, descriptor, len);
+
+ priv->pin_info_table =
+ kzalloc(sizeof(struct pin_info) * descriptor->pins_per_bank *
+ descriptor->banks,
+ GFP_KERNEL);
+ if (!priv->pin_info_table)
+ return -ENOMEM;
+
+ for (i = 0; i < descriptor->banks; i++) {
+ for (j = 0; j < descriptor->pins_per_bank; j++) {
+ pin_id = descriptor->pins_per_bank * i + j;
+ if ((descriptor->bank_table[i].bitmap & (1 << j))) {
+ priv->pin_info_table[pin_id].valid = true;
+ priv->valid_gpio_num++;
+ dev_dbg(&stub->intf->dev,
+ "%s found one valid pin (%d %d %d %d %d)\n",
+ __func__, i, j,
+ descriptor->bank_table[i].pin_num,
+ descriptor->pins_per_bank,
+ priv->valid_gpio_num);
+ } else {
+ priv->pin_info_table[pin_id].valid = false;
+ }
+ priv->pin_info_table[pin_id].bank_id = i;
+ }
+ }
+
+ dev_dbg(&stub->intf->dev, "%s valid_gpio_num:%d total_gpio_num:%d\n",
+ __func__, priv->valid_gpio_num, priv->total_gpio_num);
+ return 0;
+}
+
+static struct pin_info *gpio_stub_get_pin_info(struct usb_stub *stub, u8 pin_id)
+{
+ struct pin_info *pin_info = NULL;
+ struct gpio_stub_priv *priv = stub->priv;
+
+ BUG_ON(!priv);
+
+ if (!(pin_id <
+ priv->descriptor.banks * priv->descriptor.pins_per_bank)) {
+ dev_err(&stub->intf->dev,
+ "pin_id:%d banks:%d, pins_per_bank:%d\n", pin_id,
+ priv->descriptor.banks, priv->descriptor.pins_per_bank);
+ return NULL;
+ }
+
+ pin_info = &priv->pin_info_table[pin_id];
+ if (!pin_info || !pin_info->valid) {
+ dev_err(&stub->intf->dev,
+ "%s pin_id:%d banks:%d, pins_per_bank:%d valid:%d",
+ __func__, pin_id, priv->descriptor.banks,
+ priv->descriptor.pins_per_bank, pin_info->valid);
+
+ return NULL;
+ }
+
+ return pin_info;
+}
+
+static int gpio_stub_ready(struct usb_stub *stub, void *cookie, u8 len);
+int gpio_stub_init(struct usb_interface *intf, void *cookie, u8 len)
+{
+ struct usb_stub *stub = usb_stub_alloc(intf);
+ struct gpio_stub_priv *priv;
+
+ if (!intf || !stub)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(struct gpio_stub_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->stub = stub;
+ priv->id = DEFAULT_GPIO_CONTROLLER_ID;
+
+ stub->priv = priv;
+ stub->type = GPIO_CMD_TYPE;
+ stub->intf = intf;
+ stub->parse = gpio_stub_parse;
+ stub->cleanup = gpio_stub_cleanup;
+
+ return gpio_stub_ready(stub, cookie, len);
+}
+
+/** gpio function */
+static u8 gpio_get_payload_len(u8 pin_num)
+{
+ return sizeof(struct gpio_packet) + pin_num * sizeof(struct gpio_op);
+}
+
+static int gpio_config(struct usb_stub *stub, u8 pin_id, u8 config)
+{
+ struct gpio_packet *packet;
+ struct pin_info *pin_info;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+
+ if (!stub)
+ return -EINVAL;
+
+ dev_dbg(&stub->intf->dev, "%s pin_id:%u\n", __func__, pin_id);
+
+ packet = (struct gpio_packet *)buf;
+
+ pin_info = gpio_stub_get_pin_info(stub, pin_id);
+ if (!pin_info) {
+ dev_err(&stub->intf->dev, "invalid gpio pin pin_id:%d\n",
+ pin_id);
+ return -EINVAL;
+ }
+
+ packet->item[0].index = pin_id;
+ packet->item[0].value = config | pin_info->connect_mode;
+ packet->num = 1;
+
+ return usb_stub_write(stub, GPIO_CONFIG, (u8 *)packet,
+ (u8)gpio_get_payload_len(packet->num), true,
+ USB_WRITE_ACK_TIMEOUT);
+}
+
+static int intel_ulpss_gpio_read(struct usb_stub *stub, u8 pin_id, int *data)
+{
+ struct gpio_packet *packet;
+ struct pin_info *pin_info;
+ struct gpio_packet *ack_packet;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+ int ret;
+
+ if (!stub)
+ return -EINVAL;
+
+ packet = (struct gpio_packet *)buf;
+ packet->num = 1;
+
+ pin_info = gpio_stub_get_pin_info(stub, pin_id);
+ if (!pin_info) {
+ dev_err(&stub->intf->dev, "invalid gpio pin_id:[%u]", pin_id);
+ return -EINVAL;
+ }
+
+ packet->item[0].index = pin_id;
+ ret = usb_stub_write(stub, GPIO_READ, (u8 *)packet,
+ (u8)gpio_get_payload_len(packet->num), true,
+ USB_WRITE_ACK_TIMEOUT);
+
+ ack_packet = (struct gpio_packet *)stub->buf;
+
+ BUG_ON(!ack_packet);
+ if (ret || !stub->len || ack_packet->num != packet->num) {
+ dev_err(&stub->intf->dev,
+ "%s usb_stub_write failed pin_id:%d ret %d", __func__,
+ pin_id, ret);
+ return -EIO;
+ }
+
+ *data = (ack_packet->item[0].value > 0) ? 1 : 0;
+
+ return ret;
+}
+
+static int intel_ulpss_gpio_write(struct usb_stub *stub, u8 pin_id, int value)
+{
+ struct gpio_packet *packet;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+
+ BUG_ON(!stub);
+
+ packet = (struct gpio_packet *)buf;
+ packet->num = 1;
+
+ packet->item[0].index = pin_id;
+ packet->item[0].value = (value & 1);
+
+ return usb_stub_write(stub, GPIO_WRITE, buf,
+ gpio_get_payload_len(packet->num), true,
+ USB_WRITE_ACK_TIMEOUT);
+}
+
+/* gpio chip*/
+static int intel_ulpss_gpio_get_value(struct gpio_chip *chip, unsigned off)
+{
+ struct gpio_stub_priv *priv = gpiochip_get_data(chip);
+ int value = 0;
+ int ret;
+
+ dev_dbg(chip->parent, "%s off:%u\n", __func__, off);
+ ret = intel_ulpss_gpio_read(priv->stub, off, &value);
+ if (ret) {
+ dev_err(chip->parent, "%s off:%d get vaule failed %d\n",
+ __func__, off, ret);
+ }
+ return value;
+}
+
+static void intel_ulpss_gpio_set_value(struct gpio_chip *chip, unsigned off, int val)
+{
+ struct gpio_stub_priv *priv = gpiochip_get_data(chip);
+ int ret;
+
+ dev_dbg(chip->parent, "%s off:%u val:%d\n", __func__, off, val);
+ ret = intel_ulpss_gpio_write(priv->stub, off, val);
+ if (ret) {
+ dev_err(chip->parent, "%s off:%d val:%d set vaule failed %d\n",
+ __func__, off, val, ret);
+ }
+}
+
+static int intel_ulpss_gpio_direction_input(struct gpio_chip *chip, unsigned off)
+{
+ struct gpio_stub_priv *priv = gpiochip_get_data(chip);
+ u8 config = GPIO_CONF_INPUT | GPIO_CONF_CLR;
+
+ dev_dbg(chip->parent, "%s off:%u\n", __func__, off);
+ return gpio_config(priv->stub, off, config);
+}
+
+static int intel_ulpss_gpio_direction_output(struct gpio_chip *chip, unsigned off,
+ int val)
+{
+ struct gpio_stub_priv *priv = gpiochip_get_data(chip);
+ u8 config = GPIO_CONF_OUTPUT | GPIO_CONF_CLR;
+ int ret;
+
+ dev_dbg(chip->parent, "%s off:%u\n", __func__, off);
+ ret = gpio_config(priv->stub, off, config);
+ if (ret)
+ return ret;
+
+ intel_ulpss_gpio_set_value(chip, off, val);
+ return ret;
+}
+
+static int intel_ulpss_gpio_set_config(struct gpio_chip *chip, unsigned int off,
+ unsigned long config)
+{
+ struct gpio_stub_priv *priv = gpiochip_get_data(chip);
+ struct pin_info *pin_info;
+
+ dev_dbg(chip->parent, "%s off:%d\n", __func__, off);
+
+ pin_info = gpio_stub_get_pin_info(priv->stub, off);
+ if (!pin_info) {
+ dev_err(chip->parent, "invalid gpio pin off:%d pin_id:%d\n",
+ off, off);
+
+ return -EINVAL;
+ }
+
+ dev_dbg(chip->parent, " %s off:%d config:%d\n", __func__, off,
+ pinconf_to_config_param(config));
+
+ pin_info->connect_mode = 0;
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ pin_info->connect_mode |= GPIO_CONF_PULLUP;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ pin_info->connect_mode |= GPIO_CONF_PULLDOWN;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ dev_dbg(chip->parent, " %s off:%d connect_mode:%d\n", __func__, off,
+ pin_info->connect_mode);
+ return 0;
+}
+
+static void gpio_dev_release(struct device *dev)
+{
+ dev_dbg(dev, "%s\n", __func__);
+}
+
+static int intel_ulpss_gpio_chip_setup(struct usb_interface *intf,
+ struct usb_stub *stub)
+{
+ struct gpio_stub_priv *priv;
+ struct gpio_chip *gc;
+ struct acpi_device *adev;
+ int ret;
+
+ priv = stub->priv;
+ priv->dev.parent = &intf->dev;
+ priv->dev.init_name = "intel-ulpss-gpio";
+ priv->dev.release = gpio_dev_release;
+ adev = find_adev_by_hid(
+ ACPI_COMPANION(&(interface_to_usbdev(intf)->dev)),
+ USB_BRIDGE_GPIO_HID);
+ if (adev) {
+ ACPI_COMPANION_SET(&priv->dev, adev);
+ dev_info(&intf->dev, "found: %s -> %s\n", dev_name(&intf->dev),
+ acpi_device_hid(adev));
+ } else {
+ dev_err(&intf->dev, "not found: %s\n", USB_BRIDGE_GPIO_HID);
+ }
+
+ ret = device_register(&priv->dev);
+ if (ret) {
+ dev_err(&intf->dev, "device register failed\n");
+ device_unregister(&priv->dev);
+
+ return ret;
+ }
+
+ gc = &priv->gpio_chip;
+ gc->direction_input = intel_ulpss_gpio_direction_input;
+ gc->direction_output = intel_ulpss_gpio_direction_output;
+ gc->get = intel_ulpss_gpio_get_value;
+ gc->set = intel_ulpss_gpio_set_value;
+ gc->set_config = intel_ulpss_gpio_set_config;
+ gc->can_sleep = true;
+ gc->parent = &priv->dev;
+
+ gc->base = -1;
+ gc->ngpio = priv->total_gpio_num;
+ gc->label = "intel_ulpss gpiochip";
+ gc->owner = THIS_MODULE;
+
+ ret = gpiochip_add_data(gc, priv);
+ if (ret) {
+ dev_err(&intf->dev, "%s gpiochip add failed ret:%d\n", __func__,
+ ret);
+ } else {
+ priv->ready = true;
+ dev_info(&intf->dev,
+ "%s gpiochip add success, base:%d ngpio:%d\n",
+ __func__, gc->base, gc->ngpio);
+ }
+
+ return ret;
+}
+
+static int gpio_stub_ready(struct usb_stub *stub, void *cookie, u8 len)
+{
+ struct gpio_descriptor *descriptor = cookie;
+ int ret;
+
+ if (!descriptor || (descriptor->pins_per_bank <= 0) ||
+ (descriptor->banks <= 0)) {
+ dev_err(&stub->intf->dev,
+ "%s gpio stub descriptor not correct\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = gpio_stub_update_descriptor(stub, descriptor, len);
+ if (ret) {
+ dev_err(&stub->intf->dev,
+ "%s gpio stub update descriptor failed\n", __func__);
+ return ret;
+ }
+
+ ret = intel_ulpss_gpio_chip_setup(stub->intf, stub);
+
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#ifndef __GPIO_STUB_H__
+#define __GPIO_STUB_H__
+
+int gpio_stub_init(struct usb_interface *intf, void *cookie, u8 len);
+
+#endif
new file mode 100644
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bug.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include "i2c_stub.h"
+#include "protocol_intel_ulpss.h"
+#include "usb_stub.h"
+
+#define USB_BRIDGE_I2C_HID_0 "INTC1075"
+#define USB_BRIDGE_I2C_HID_1 "INTC1076"
+
+static char *usb_bridge_i2c_hids[] = { USB_BRIDGE_I2C_HID_0,
+ USB_BRIDGE_I2C_HID_1 };
+
+struct i2c_stub_priv {
+ struct usb_stub *stub;
+ struct i2c_descriptor descriptor;
+ struct i2c_adapter *adap;
+
+ bool ready;
+};
+
+static u8 i2c_format_slave_addr(u8 slave_addr, enum i2c_address_mode mode)
+{
+ if (mode == I2C_ADDRESS_MODE_7Bit)
+ return slave_addr << 1;
+
+ return 0xFF;
+}
+
+static void i2c_stub_cleanup(struct usb_stub *stub)
+{
+ struct i2c_stub_priv *priv = stub->priv;
+ int i;
+
+ if (!priv)
+ return;
+
+ if (priv->ready) {
+ for (i = 0; i < priv->descriptor.num; i++)
+ if (priv->adap[i].nr != -1)
+ i2c_del_adapter(&priv->adap[i]);
+
+ if (priv->adap)
+ kfree(priv->adap);
+ }
+
+ kfree(priv);
+}
+
+static int i2c_stub_update_descriptor(struct usb_stub *stub,
+ struct i2c_descriptor *descriptor, u8 len)
+{
+ struct i2c_stub_priv *priv = stub->priv;
+
+ if (!priv || !descriptor ||
+ len != offsetof(struct i2c_descriptor, info) +
+ descriptor->num *
+ sizeof(struct i2c_controller_info) ||
+ len > sizeof(priv->descriptor))
+ return -EINVAL;
+
+ memcpy(&priv->descriptor, descriptor, len);
+
+ return 0;
+}
+
+static int i2c_stub_ready(struct usb_stub *stub, void *cookie, u8 len);
+int i2c_stub_init(struct usb_interface *intf, void *cookie, u8 len)
+{
+ struct usb_stub *stub = usb_stub_alloc(intf);
+ struct i2c_stub_priv *priv;
+
+ if (!intf || !stub)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(struct i2c_stub_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ stub->priv = priv;
+ stub->type = I2C_CMD_TYPE;
+ stub->intf = intf;
+ stub->cleanup = i2c_stub_cleanup;
+
+ priv->stub = stub;
+
+ return i2c_stub_ready(stub, cookie, len);
+}
+
+/** i2c intf */
+static int intel_ulpss_i2c_init(struct usb_stub *stub, u8 id)
+{
+ struct i2c_raw_io *i2c_config;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+ int ret;
+
+ i2c_config = (struct i2c_raw_io *)buf;
+
+ i2c_config->id = id;
+ i2c_config->len = 1;
+ i2c_config->data[0] = I2C_FLAG_FREQ_400K;
+
+ ret = usb_stub_write(stub, I2C_INIT, (u8 *)i2c_config,
+ sizeof(struct i2c_raw_io) + i2c_config->len, true,
+ USB_WRITE_ACK_TIMEOUT);
+
+ return ret;
+}
+
+static int intel_ulpss_i2c_start(struct usb_stub *stub, u8 id, u8 slave_addr,
+ enum xfer_type type)
+{
+ struct i2c_raw_io *raw_io;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+ int ret;
+
+ BUG_ON(!stub);
+
+ raw_io = (struct i2c_raw_io *)buf;
+ raw_io->id = id;
+ raw_io->len = 1;
+ raw_io->data[0] =
+ i2c_format_slave_addr(slave_addr, I2C_ADDRESS_MODE_7Bit);
+ raw_io->data[0] |= (type == READ_XFER_TYPE) ? I2C_SLAVE_TRANSFER_READ :
+ I2C_SLAVE_TRANSFER_WRITE;
+
+ ret = usb_stub_write(stub, I2C_START, (u8 *)raw_io,
+ sizeof(struct i2c_raw_io) + raw_io->len, true,
+ USB_WRITE_ACK_TIMEOUT);
+
+ if (stub->len < sizeof(struct i2c_raw_io))
+ return -EIO;
+
+ raw_io = (struct i2c_raw_io *)stub->buf;
+ if (raw_io->len < 0 || raw_io->id != id) {
+ dev_err(&stub->intf->dev,
+ "%s i2c start failed len:%d id:%d %d\n", __func__,
+ raw_io->len, raw_io->id, id);
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static int intel_ulpss_i2c_stop(struct usb_stub *stub, u8 id, u8 slave_addr)
+{
+ struct i2c_raw_io *raw_io;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+ int ret;
+
+ BUG_ON(!stub);
+
+ raw_io = (struct i2c_raw_io *)buf;
+ raw_io->id = id;
+ raw_io->len = 1;
+ raw_io->data[0] = 0;
+
+ ret = usb_stub_write(stub, I2C_STOP, (u8 *)raw_io,
+ sizeof(struct i2c_raw_io) + 1, true,
+ USB_WRITE_ACK_TIMEOUT);
+
+ if (stub->len < sizeof(struct i2c_raw_io))
+ return -EIO;
+
+ raw_io = (struct i2c_raw_io *)stub->buf;
+ if (raw_io->len < 0 || raw_io->id != id) {
+ dev_err(&stub->intf->dev,
+ "%s i2c stop failed len:%d id:%d %d\n", __func__,
+ raw_io->len, raw_io->id, id);
+ return -EIO;
+ }
+
+ return ret;
+}
+
+static int intel_ulpss_i2c_pure_read(struct usb_stub *stub, u8 id, u8 *data,
+ u8 len)
+{
+ struct i2c_raw_io *raw_io;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+ int ret;
+
+ raw_io = (struct i2c_raw_io *)buf;
+
+ BUG_ON(!stub);
+ if (len > MAX_PAYLOAD_SIZE - sizeof(struct i2c_raw_io)) {
+ return -EINVAL;
+ }
+
+ raw_io->id = id;
+ raw_io->len = len;
+ raw_io->data[0] = 0;
+ ret = usb_stub_write(stub, I2C_READ, (u8 *)raw_io,
+ sizeof(struct i2c_raw_io) + 1, true,
+ USB_WRITE_ACK_TIMEOUT);
+ if (ret) {
+ dev_err(&stub->intf->dev, "%s ret:%d\n", __func__, ret);
+ return ret;
+ }
+
+ if (stub->len < sizeof(struct i2c_raw_io))
+ return -EIO;
+
+ raw_io = (struct i2c_raw_io *)stub->buf;
+ if (raw_io->len < 0 || raw_io->id != id) {
+ dev_err(&stub->intf->dev,
+ "%s i2 raw read failed len:%d id:%d %d\n", __func__,
+ raw_io->len, raw_io->id, id);
+ return -EIO;
+ }
+
+ BUG_ON(raw_io->len != len);
+ memcpy(data, raw_io->data, raw_io->len);
+
+ return 0;
+}
+
+static int intel_ulpss_i2c_read(struct usb_stub *stub, u8 id, u8 slave_addr,
+ u8 *data, u8 len)
+{
+ int ret;
+
+ BUG_ON(!stub);
+ ret = intel_ulpss_i2c_start(stub, id, slave_addr, READ_XFER_TYPE);
+ if (ret) {
+ dev_err(&stub->intf->dev, "%s i2c start failed ret:%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = intel_ulpss_i2c_pure_read(stub, id, data, len);
+ if (ret) {
+ dev_err(&stub->intf->dev, "%s i2c raw read failed ret:%d\n",
+ __func__, ret);
+
+ return ret;
+ }
+
+ ret = intel_ulpss_i2c_stop(stub, id, slave_addr);
+ if (ret) {
+ dev_err(&stub->intf->dev, "%s i2c stop failed ret:%d\n",
+ __func__, ret);
+
+ return ret;
+ }
+
+ return ret;
+}
+
+static int intel_ulpss_i2c_pure_write(struct usb_stub *stub, u8 id, u8 *data,
+ u8 len)
+{
+ struct i2c_raw_io *raw_io;
+ u8 buf[MAX_PAYLOAD_SIZE] = { 0 };
+ int ret;
+
+ if (len > MAX_PAYLOAD_SIZE - sizeof(struct i2c_raw_io)) {
+ dev_err(&stub->intf->dev, "%s unexpected long data, len: %d",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ raw_io = (struct i2c_raw_io *)buf;
+ raw_io->id = id;
+ raw_io->len = len;
+ memcpy(raw_io->data, data, len);
+
+ ret = usb_stub_write(stub, I2C_WRITE, (u8 *)raw_io,
+ sizeof(struct i2c_raw_io) + raw_io->len, true,
+ USB_WRITE_ACK_TIMEOUT);
+
+ if (stub->len < sizeof(struct i2c_raw_io))
+ return -EIO;
+
+ raw_io = (struct i2c_raw_io *)stub->buf;
+ if (raw_io->len < 0 || raw_io->id != id) {
+ dev_err(&stub->intf->dev,
+ "%s i2c write failed len:%d id:%d %d\n", __func__,
+ raw_io->len, raw_io->id, id);
+ return -EIO;
+ }
+ return ret;
+}
+
+static int intel_ulpss_i2c_write(struct usb_stub *stub, u8 id, u8 slave_addr,
+ u8 *data, u8 len)
+{
+ int ret;
+ BUG_ON(!stub);
+
+ ret = intel_ulpss_i2c_start(stub, id, slave_addr, WRITE_XFER_TYPE);
+ if (ret)
+ return ret;
+
+ ret = intel_ulpss_i2c_pure_write(stub, id, data, len);
+ if (ret)
+ return ret;
+
+ ret = intel_ulpss_i2c_stop(stub, id, slave_addr);
+
+ return ret;
+}
+
+static int intel_ulpss_i2c_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msg, int num)
+{
+ struct i2c_stub_priv *priv;
+ struct i2c_msg *cur_msg;
+ struct usb_stub *stub;
+ int id = -1;
+ int i, ret;
+
+ priv = i2c_get_adapdata(adapter);
+ stub = priv->stub;
+
+ if (!stub || !priv) {
+ dev_err(&adapter->dev, "%s num:%d stub:0x%lx priv:0x%lx\n",
+ __func__, num, (long)stub, (long)priv);
+ return 0;
+ }
+
+ for (i = 0; i < priv->descriptor.num; i++)
+ if (&priv->adap[i] == adapter)
+ id = i;
+
+ mutex_lock(&stub->stub_mutex);
+ ret = intel_ulpss_i2c_init(stub, id);
+ if (ret) {
+ dev_err(&adapter->dev, "%s i2c init failed id:%d\n", __func__,
+ adapter->nr);
+ mutex_unlock(&stub->stub_mutex);
+ return 0;
+ }
+
+ for (i = 0; !ret && i < num && id >= 0; i++) {
+ cur_msg = &msg[i];
+ dev_dbg(&adapter->dev, "%s i:%d id:%d msg:(%d %d)\n", __func__,
+ i, id, cur_msg->flags, cur_msg->len);
+ if (cur_msg->flags & I2C_M_RD)
+ ret = intel_ulpss_i2c_read(priv->stub, id,
+ cur_msg->addr, cur_msg->buf,
+ cur_msg->len);
+
+ else
+ ret = intel_ulpss_i2c_write(priv->stub, id,
+ cur_msg->addr, cur_msg->buf,
+ cur_msg->len);
+ }
+
+ mutex_unlock(&stub->stub_mutex);
+ dev_dbg(&adapter->dev, "%s id:%d ret:%d\n", __func__, id, ret);
+
+ /* return the number of messages processed, or the error code */
+ if (ret)
+ return ret;
+ return num;
+}
+
+static u32 intel_ulpss_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+static const struct i2c_algorithm intel_ulpss_i2c_algo = {
+ .master_xfer = intel_ulpss_i2c_xfer,
+ .functionality = intel_ulpss_i2c_func,
+};
+
+static int intel_ulpss_i2c_adapter_register(struct usb_interface *intf,
+ struct i2c_adapter *adap, int index)
+{
+ struct acpi_device *adev;
+
+ if (!adap || index < 0 || index > sizeof(usb_bridge_i2c_hids))
+ return -EINVAL;
+
+ adev = find_adev_by_hid(
+ ACPI_COMPANION(&(interface_to_usbdev(intf)->dev)),
+ usb_bridge_i2c_hids[index]);
+ if (adev) {
+ dev_info(&intf->dev, "Found: %s -> %s\n", dev_name(&intf->dev),
+ acpi_device_hid(adev));
+ ACPI_COMPANION_SET(&adap->dev, adev);
+ } else {
+ dev_err(&intf->dev, "Not Found: %s\n",
+ usb_bridge_i2c_hids[index]);
+ }
+
+ return i2c_add_adapter(adap);
+}
+
+static int intel_ulpss_i2c_adapter_setup(struct usb_interface *intf,
+ struct usb_stub *stub)
+{
+ struct i2c_stub_priv *priv;
+ int ret;
+ int i;
+
+ if (!intf || !stub)
+ return -EINVAL;
+
+ priv = stub->priv;
+ if (!priv)
+ return -EINVAL;
+
+ priv->adap = kzalloc(sizeof(struct i2c_adapter) * priv->descriptor.num,
+ GFP_KERNEL);
+
+ for (i = 0; i < priv->descriptor.num; i++) {
+ priv->adap[i].owner = THIS_MODULE;
+ snprintf(priv->adap[i].name, sizeof(priv->adap[i].name),
+ "intel-ulpss-i2c-%d", i);
+ priv->adap[i].algo = &intel_ulpss_i2c_algo;
+ priv->adap[i].dev.parent = &intf->dev;
+
+ i2c_set_adapdata(&priv->adap[i], priv);
+
+ ret = intel_ulpss_i2c_adapter_register(intf, &priv->adap[i], i);
+ if (ret)
+ return ret;
+ }
+
+ priv->ready = true;
+ return ret;
+}
+
+static int i2c_stub_ready(struct usb_stub *stub, void *cookie, u8 len)
+{
+ struct i2c_descriptor *descriptor = cookie;
+ int ret;
+
+ if (!descriptor || descriptor->num <= 0) {
+ dev_err(&stub->intf->dev,
+ "%s i2c stub descriptor not correct\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = i2c_stub_update_descriptor(stub, descriptor, len);
+ if (ret) {
+ dev_err(&stub->intf->dev,
+ "%s i2c stub update descriptor failed\n", __func__);
+ return ret;
+ }
+
+ ret = intel_ulpss_i2c_adapter_setup(stub->intf, stub);
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#ifndef __I2C_STUB_H__
+#define __I2C_STUB_H__
+
+#include <linux/types.h>
+
+enum i2c_address_mode { I2C_ADDRESS_MODE_7Bit, I2C_ADDRESS_MODE_10Bit };
+enum xfer_type {
+ READ_XFER_TYPE,
+ WRITE_XFER_TYPE,
+};
+
+int i2c_stub_init(struct usb_interface *intf, void *cookie, u8 len);
+
+#endif
new file mode 100644
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include "ulpss_bridge.h"
+#include "mng_stub.h"
+#include "protocol_intel_ulpss.h"
+#include "usb_stub.h"
+#include "i2c_stub.h"
+#include "gpio_stub.h"
+
+struct mng_stub_priv {
+ long reset_id;
+ struct usb_stub *stub;
+ bool synced;
+};
+
+static void mng_cleanup(struct usb_stub *stub)
+{
+ struct mng_stub_priv *priv = stub->priv;
+
+ if (priv)
+ kfree(priv);
+}
+
+static int mng_reset_ack(struct usb_stub *stub, u32 reset_id)
+{
+ return usb_stub_write(stub, MNG_RESET_NOTIFY, (u8 *)&reset_id,
+ (u8)sizeof(reset_id), false,
+ USB_WRITE_ACK_TIMEOUT);
+}
+
+static int mng_stub_parse(struct usb_stub *stub, u8 cmd, u8 ack, u8 *data,
+ u32 len)
+{
+ struct mng_stub_priv *priv = stub->priv;
+ int ret = 0;
+
+ if (!stub || !stub->intf)
+ return -EINVAL;
+
+ switch (cmd) {
+ case MNG_RESET_NOTIFY:
+ if (data && (len >= sizeof(u32))) {
+ u32 reset_id = *(u32 *)data;
+
+ if (!ack)
+ ret = mng_reset_ack(stub, reset_id);
+
+ priv->synced = (!ret) ? true : false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int mng_stub_init(struct usb_interface *intf, void *cookie, u8 len)
+{
+ struct usb_stub *stub = usb_stub_alloc(intf);
+ struct mng_stub_priv *priv;
+
+ if (!intf || !stub)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(struct mng_stub_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->reset_id = 0;
+ priv->stub = stub;
+
+ stub->priv = priv;
+ stub->type = MNG_CMD_TYPE;
+ stub->intf = intf;
+ stub->parse = mng_stub_parse;
+ stub->cleanup = mng_cleanup;
+
+ return 0;
+}
+
+static int mng_reset_handshake(struct usb_stub *stub)
+{
+ int ret;
+ struct mng_stub_priv *priv = stub->priv;
+ long reset_id;
+
+ if (!stub)
+ return -EINVAL;
+
+ reset_id = priv->reset_id++;
+ ret = usb_stub_write(stub, MNG_RESET_NOTIFY, (u8 *)&reset_id,
+ (u8)sizeof(reset_id), true, USB_WRITE_ACK_TIMEOUT);
+
+ if (ret || !priv->synced) {
+ dev_err(&stub->intf->dev, "%s priv->synced:%d ret:%d\n",
+ __func__, priv->synced, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int mng_reset(struct usb_stub *stub)
+{
+ if (!stub)
+ return -EINVAL;
+
+ return usb_stub_write(stub, MNG_RESET, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+}
+
+static int mng_enum_gpio(struct usb_stub *stub)
+{
+ int ret;
+
+ if (!stub)
+ return -EINVAL;
+
+ ret = usb_stub_write(stub, MNG_ENUM_GPIO, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+ if (ret || stub->len <= 0) {
+ dev_err(&stub->intf->dev, "%s enum gpio failed ret:%d len:%d\n",
+ __func__, ret, stub->len);
+ return ret;
+ }
+
+ return gpio_stub_init(stub->intf, stub->buf, stub->len);
+}
+
+static int mng_enum_i2c(struct usb_stub *stub)
+{
+ int ret;
+
+ if (!stub)
+ return -EINVAL;
+
+ ret = usb_stub_write(stub, MNG_ENUM_I2C, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+ if (ret || stub->len <= 0) {
+ dev_err(&stub->intf->dev, "%s enum gpio failed ret:%d len:%d\n",
+ __func__, ret, stub->len);
+ ret = -EIO;
+ return ret;
+ }
+
+ ret = i2c_stub_init(stub->intf, stub->buf, stub->len);
+ return ret;
+}
+
+int mng_get_version(struct usb_stub *stub, struct fw_version *version)
+{
+ int ret;
+
+ if (!stub || !version)
+ return -EINVAL;
+
+ mutex_lock(&stub->stub_mutex);
+ ret = usb_stub_write(stub, MNG_GET_VERSION, NULL, 0, true,
+ USB_WRITE_ACK_TIMEOUT);
+ if (ret || stub->len < sizeof(struct fw_version)) {
+ mutex_unlock(&stub->stub_mutex);
+ dev_err(&stub->intf->dev,
+ "%s MNG_GET_VERSION failed ret:%d len:%d\n", __func__,
+ ret, stub->len);
+ ret = -EIO;
+ return ret;
+ }
+
+ memcpy(version, stub->buf, sizeof(struct fw_version));
+ mutex_unlock(&stub->stub_mutex);
+
+ return 0;
+}
+
+int mng_get_version_string(struct usb_stub *stub, char *buf)
+{
+ int ret;
+ struct fw_version version;
+ if (!buf)
+ return -EINVAL;
+
+ ret = mng_get_version(stub, &version);
+ if (ret) {
+ dev_err(&stub->intf->dev, "%s mng get fw version failed ret:%d",
+ __func__, ret);
+
+ ret = sprintf(buf, "%d.%d.%d.%d\n", 1, 1, 1, 1);
+ return ret;
+ }
+
+ ret = sprintf(buf, "%d.%d.%d.%d\n", version.major, version.minor,
+ version.patch, version.build);
+
+ return ret;
+}
+
+int mng_set_dfu_mode(struct usb_stub *stub)
+{
+ int ret;
+ struct mng_stub_priv *priv = NULL;
+ if (!stub)
+ return -EINVAL;
+
+ priv = stub->priv;
+
+ mutex_lock(&stub->stub_mutex);
+ ret = usb_stub_write(stub, MNG_SET_DFU_MODE, NULL, 0, false,
+ USB_WRITE_ACK_TIMEOUT);
+ mutex_unlock(&stub->stub_mutex);
+
+ return ret;
+}
+
+int mng_stub_link(struct usb_interface *intf, struct usb_stub *mng_stub)
+{
+ int ret;
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+
+ BUG_ON(!intel_ulpss_dev || !mng_stub);
+
+ ret = mng_reset_handshake(mng_stub);
+ if (ret)
+ return ret;
+ intel_ulpss_dev->state = USB_BRIDGE_RESET_SYNCED;
+
+ ret = mng_enum_gpio(mng_stub);
+ if (ret)
+ return ret;
+ intel_ulpss_dev->state = USB_BRIDGE_ENUM_GPIO_COMPLETE;
+
+ ret = mng_enum_i2c(mng_stub);
+ if (ret)
+ return ret;
+ intel_ulpss_dev->state = USB_BRIDGE_ENUM_I2C_COMPLETE;
+ intel_ulpss_dev->state = USB_BRIDGE_STARTED;
+
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,18 @@
+
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#ifndef __MNG_STUB_H__
+#define __MNG_STUB_H__
+#include "usb_stub.h"
+
+int mng_stub_init(struct usb_interface *intf, void *cookie, u8 len);
+int mng_get_version_string(struct usb_stub *stub, char *buf);
+int mng_set_dfu_mode(struct usb_stub *stub);
+int mng_stub_link(struct usb_interface *intf, struct usb_stub *mng_stub);
+int mng_reset(struct usb_stub *stub);
+#endif
new file mode 100644
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#ifndef __PROTOCOL_INTEL_ULPSS_H__
+#define __PROTOCOL_INTEL_ULPSS_H__
+
+#include <linux/types.h>
+
+/*
+* Define FW Communication protocol
+*/
+#define MAX_GPIO_NUM 20
+#define MAX_GPIO_BANK_NUM 5
+
+#define MAX_I2C_CONTROLLER_NUM 2
+
+/* command types */
+#define MNG_CMD_TYPE 1
+#define DIAG_CMD_TYPE 2
+#define GPIO_CMD_TYPE 3
+#define I2C_CMD_TYPE 4
+#define SPI_CMD_TYPE 5
+
+/* command Flags */
+#define ACK_FLAG BIT(0)
+#define RESP_FLAG BIT(1)
+#define CMPL_FLAG BIT(2)
+
+/* MNG commands */
+#define MNG_GET_VERSION 1
+#define MNG_RESET_NOTIFY 2
+#define MNG_RESET 3
+#define MNG_ENUM_GPIO 4
+#define MNG_ENUM_I2C 5
+#define MNG_POWER_STATE_CHANGE 6
+#define MNG_SET_DFU_MODE 7
+
+/* DIAG commands */
+#define DIAG_GET_STATE 0x01
+#define DIAG_GET_STATISTIC 0x02
+#define DIAG_SET_TRACE_LEVEL 0x03
+#define DIAG_SET_ECHO_MODE 0x04
+#define DIAG_GET_FW_LOG 0x05
+#define DIAG_GET_FW_COREDUMP 0x06
+#define DIAG_TRIGGER_WDT 0x07
+#define DIAG_TRIGGER_FAULT 0x08
+#define DIAG_FEED_WDT 0x09
+#define DIAG_GET_SECURE_STATE 0x0A
+
+/* GPIO commands */
+#define GPIO_CONFIG 1
+#define GPIO_READ 2
+#define GPIO_WRITE 3
+#define GPIO_INTR_NOTIFY 4
+
+/* I2C commands */
+#define I2C_INIT 1
+#define I2C_XFER 2
+#define I2C_START 3
+#define I2C_STOP 4
+#define I2C_READ 5
+#define I2C_WRITE 6
+
+#define GPIO_CONF_DISABLE BIT(0)
+#define GPIO_CONF_INPUT BIT(1)
+#define GPIO_CONF_OUTPUT BIT(2)
+#define GPIO_CONF_PULLUP BIT(3)
+#define GPIO_CONF_PULLDOWN BIT(4)
+
+/* Intentional overlap with PULLUP / PULLDOWN */
+#define GPIO_CONF_SET BIT(3)
+#define GPIO_CONF_CLR BIT(4)
+
+struct cmd_header {
+ u8 type;
+ u8 cmd;
+ u8 flags;
+ u8 len;
+ u8 data[];
+} __attribute__((packed));
+
+struct fw_version {
+ u8 major;
+ u8 minor;
+ u16 patch;
+ u16 build;
+} __attribute__((packed));
+
+struct bank_descriptor {
+ u8 bank_id;
+ u8 pin_num;
+
+ /* 1 bit for each gpio, 1 means valid */
+ u32 bitmap;
+} __attribute__((packed));
+
+struct gpio_descriptor {
+ u8 pins_per_bank;
+ u8 banks;
+ struct bank_descriptor bank_table[MAX_GPIO_BANK_NUM];
+} __attribute__((packed));
+
+struct i2c_controller_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+} __attribute__((packed));
+
+struct i2c_descriptor {
+ u8 num;
+ struct i2c_controller_info info[MAX_I2C_CONTROLLER_NUM];
+} __attribute__((packed));
+
+struct gpio_op {
+ u8 index;
+ u8 value;
+} __attribute__((packed));
+
+struct gpio_packet {
+ u8 num;
+ struct gpio_op item[0];
+} __attribute__((packed));
+
+/* I2C Transfer */
+struct i2c_xfer {
+ u8 id;
+ u8 slave;
+ u16 flag; /* speed, 8/16bit addr, addr increase, etc */
+ u16 addr;
+ u16 len;
+ u8 data[0];
+} __attribute__((packed));
+
+/* I2C raw commands: Init/Start/Read/Write/Stop */
+struct i2c_raw_io {
+ u8 id;
+ s16 len;
+ u8 data[0];
+} __attribute__((packed));
+
+#define MAX_PACKET_SIZE 64
+#define MAX_PAYLOAD_SIZE (MAX_PACKET_SIZE - sizeof(struct cmd_header))
+
+#define USB_WRITE_TIMEOUT 20
+#define USB_WRITE_ACK_TIMEOUT 100
+
+#define DEFAULT_GPIO_CONTROLLER_ID 1
+#define DEFAULT_GPIO_PIN_COUNT_PER_BANK 32
+
+#define DEFAULT_I2C_CONTROLLER_ID 1
+#define DEFAULT_I2C_CAPACITY 0
+#define DEFAULT_I2C_INTR_PIN 0
+
+/* I2C r/w Flags */
+#define I2C_SLAVE_TRANSFER_WRITE (0)
+#define I2C_SLAVE_TRANSFER_READ (1)
+
+/* i2c init flags */
+#define I2C_INIT_FLAG_MODE_MASK (1 << 0)
+#define I2C_INIT_FLAG_MODE_POLLING (0 << 0)
+#define I2C_INIT_FLAG_MODE_INTERRUPT (1 << 0)
+
+#define I2C_FLAG_ADDR_16BIT (1 << 0)
+#define I2C_INIT_FLAG_FREQ_MASK (3 << 1)
+#define I2C_FLAG_FREQ_100K (0 << 1)
+#define I2C_FLAG_FREQ_400K (1 << 1)
+#define I2C_FLAG_FREQ_1M (2 << 1)
+
+#endif
new file mode 100644
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+
+#include "diag_stub.h"
+#include "i2c_stub.h"
+#include "mng_stub.h"
+#include "ulpss_bridge.h"
+
+/* Define these values to match your devices */
+#define USB_BRIDGE_VENDOR_ID 0x8086
+#define USB_BRIDGE_PRODUCT_ID 0x0b63
+
+/* table of devices that work with this driver */
+static const struct usb_device_id intel_ulpss_bridge_table[] = {
+ { USB_DEVICE(USB_BRIDGE_VENDOR_ID, USB_BRIDGE_PRODUCT_ID) },
+ {} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, intel_ulpss_bridge_table);
+
+static void intel_ulpss_bridge_read_cb(struct urb *urb)
+{
+ struct usb_bridge *intel_ulpss_dev;
+ struct bridge_msg msg;
+ unsigned long flags;
+ bool need_sched;
+ int ret;
+
+ intel_ulpss_dev = urb->context;
+ BUG_ON(!intel_ulpss_dev);
+ dev_dbg(&intel_ulpss_dev->intf->dev,
+ "%s bulk read urb got message from fw, status:%d data_len:%d\n",
+ __func__, urb->status, urb->actual_length);
+
+ if (urb->status || intel_ulpss_dev->errors) {
+ /* sync/async unlink faults aren't errors */
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN) {
+ dev_dbg(&intel_ulpss_dev->intf->dev,
+ "%s read bulk urb unlink: %d %d\n", __func__,
+ urb->status, intel_ulpss_dev->errors);
+ return;
+ } else {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s read bulk urb transfer failed: %d %d\n",
+ __func__, urb->status, intel_ulpss_dev->errors);
+ goto resubmmit;
+ }
+ }
+
+ msg.len = urb->actual_length;
+ memcpy(msg.buf, intel_ulpss_dev->bulk_in_buffer, urb->actual_length);
+
+ spin_lock_irqsave(&intel_ulpss_dev->msg_fifo_spinlock, flags);
+ need_sched = kfifo_is_empty(&intel_ulpss_dev->msg_fifo);
+
+ if (kfifo_put(&intel_ulpss_dev->msg_fifo, msg)) {
+ if (need_sched)
+ schedule_work(&intel_ulpss_dev->event_work);
+ } else {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s put msg faild full:%d\n", __func__,
+ kfifo_is_full(&intel_ulpss_dev->msg_fifo));
+ }
+
+ spin_unlock_irqrestore(&intel_ulpss_dev->msg_fifo_spinlock, flags);
+
+resubmmit:
+ /* resubmmit urb to receive fw msg */
+ ret = usb_submit_urb(intel_ulpss_dev->bulk_in_urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s failed submitting read urb, error %d\n", __func__,
+ ret);
+ }
+}
+
+static int intel_ulpss_bridge_read_start(struct usb_bridge *intel_ulpss_dev)
+{
+ int ret;
+
+ /* prepare a read */
+ usb_fill_bulk_urb(
+ intel_ulpss_dev->bulk_in_urb, intel_ulpss_dev->udev,
+ usb_rcvbulkpipe(intel_ulpss_dev->udev,
+ intel_ulpss_dev->bulk_in_endpointAddr),
+ intel_ulpss_dev->bulk_in_buffer, intel_ulpss_dev->bulk_in_size,
+ intel_ulpss_bridge_read_cb, intel_ulpss_dev);
+
+ /* submit read urb */
+ ret = usb_submit_urb(intel_ulpss_dev->bulk_in_urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s - failed submitting read urb, error %d\n", __func__,
+ ret);
+ }
+ return ret;
+}
+
+static void intel_ulpss_bridge_write_cb(struct urb *urb)
+{
+ struct usb_bridge *intel_ulpss_dev;
+
+ intel_ulpss_dev = urb->context;
+
+ if (!intel_ulpss_dev)
+ return;
+
+ if (urb->status) {
+ /* sync/async unlink faults aren't errors */
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN) {
+ dev_warn(&intel_ulpss_dev->intf->dev,
+ "%s write bulk urb unlink: %d\n", __func__,
+ urb->status);
+ } else {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s write bulk urb transfer failed: %d\n",
+ __func__, urb->status);
+
+ intel_ulpss_dev->errors = urb->status;
+ }
+ }
+
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+
+ dev_dbg(&intel_ulpss_dev->intf->dev, "%s write callback out\n",
+ __func__);
+}
+
+ssize_t intel_ulpss_bridge_write(struct usb_interface *intf, void *data,
+ size_t len, unsigned int timeout)
+{
+ struct urb *urb = NULL;
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+ char *buf = NULL;
+ int time;
+ int ret;
+
+ if (!len || len > MAX_PACKET_SIZE) {
+ dev_err(&intf->dev, "%s write len not correct len:%ld\n",
+ __func__, len);
+ return -EINVAL;
+ }
+
+ mutex_lock(&intel_ulpss_dev->write_mutex);
+ usb_autopm_get_interface(intf);
+
+ if (intel_ulpss_dev->errors) {
+ dev_err(&intf->dev, "%s dev error %d\n", __func__,
+ intel_ulpss_dev->errors);
+ intel_ulpss_dev->errors = 0;
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* create a urb, and a buffer for it, and copy the data to the urb */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ buf = usb_alloc_coherent(intel_ulpss_dev->udev, len, GFP_KERNEL,
+ &urb->transfer_dma);
+
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(buf, data, len);
+
+ if (intel_ulpss_dev->disconnected) { /* disconnect() was called */
+ ret = -ENODEV;
+ goto error;
+ }
+
+ /* initialize the urb properly */
+ usb_fill_bulk_urb(
+ urb, intel_ulpss_dev->udev,
+ usb_sndbulkpipe(intel_ulpss_dev->udev,
+ intel_ulpss_dev->bulk_out_endpointAddr),
+ buf, len, intel_ulpss_bridge_write_cb, intel_ulpss_dev);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urb, &intel_ulpss_dev->write_submitted);
+ /* send the data out the bulk port */
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s - failed submitting write urb, error %d\n",
+ __func__, ret);
+ goto error_unanchor;
+ }
+
+ /*
+ * release our reference to this urb, the USB core will eventually free
+ * it entirely
+ */
+ usb_free_urb(urb);
+
+ time = usb_wait_anchor_empty_timeout(&intel_ulpss_dev->write_submitted,
+ timeout);
+ if (!time) {
+ usb_kill_anchored_urbs(&intel_ulpss_dev->write_submitted);
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s waiting out urb sending timeout, error %d %d\n",
+ __func__, time, timeout);
+ }
+
+ usb_autopm_put_interface(intf);
+ mutex_unlock(&intel_ulpss_dev->write_mutex);
+ return len;
+
+error_unanchor:
+ usb_unanchor_urb(urb);
+error:
+ if (urb) {
+ /* free up our allocated buffer */
+ usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma);
+ usb_free_urb(urb);
+ }
+
+ usb_autopm_put_interface(intf);
+ mutex_unlock(&intel_ulpss_dev->write_mutex);
+ return ret;
+}
+
+static void intel_ulpss_bridge_delete(struct usb_bridge *intel_ulpss_dev)
+{
+ usb_free_urb(intel_ulpss_dev->bulk_in_urb);
+ usb_put_intf(intel_ulpss_dev->intf);
+ usb_put_dev(intel_ulpss_dev->udev);
+ kfree(intel_ulpss_dev->bulk_in_buffer);
+ kfree(intel_ulpss_dev);
+}
+
+static int intel_ulpss_bridge_init(struct usb_bridge *intel_ulpss_dev)
+{
+ mutex_init(&intel_ulpss_dev->write_mutex);
+ init_usb_anchor(&intel_ulpss_dev->write_submitted);
+ init_waitqueue_head(&intel_ulpss_dev->bulk_out_ack);
+ INIT_LIST_HEAD(&intel_ulpss_dev->stubs_list);
+ INIT_KFIFO(intel_ulpss_dev->msg_fifo);
+ spin_lock_init(&intel_ulpss_dev->msg_fifo_spinlock);
+
+ intel_ulpss_dev->state = USB_BRIDGE_INITED;
+
+ return 0;
+}
+
+static ssize_t cmd_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_stub *mng_stub = usb_stub_find(intf, MNG_CMD_TYPE);
+ struct usb_stub *diag_stub = usb_stub_find(intf, DIAG_CMD_TYPE);
+ int ret;
+
+ dev_dbg(dev, "%s:%u %s\n", __func__, __LINE__, buf);
+ if (sysfs_streq(buf, "dfu")) {
+ ret = mng_set_dfu_mode(mng_stub);
+ } else if (sysfs_streq(buf, "reset")) {
+ ret = mng_reset(mng_stub);
+ } else if (sysfs_streq(buf, "debug")) {
+ ret = diag_set_trace_level(diag_stub, 3);
+ }
+
+ return count;
+}
+
+static ssize_t cmd_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ dev_dbg(dev, "%s:%u \n", __func__, __LINE__);
+
+ return sprintf(buf, "%s\n", "supported cmd: [dfu, reset, debug]");
+}
+static DEVICE_ATTR_RW(cmd);
+
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_stub *mng_stub = usb_stub_find(intf, MNG_CMD_TYPE);
+
+ dev_dbg(dev, "%s:%u\n", __func__, __LINE__);
+ return mng_get_version_string(mng_stub, buf);
+}
+static DEVICE_ATTR_RO(version);
+
+static ssize_t log_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ ssize_t len;
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_stub *diag_stub = usb_stub_find(intf, DIAG_CMD_TYPE);
+
+ ret = diag_get_fw_log(diag_stub, buf, &len);
+ dev_dbg(dev, "%s:%u len %ld\n", __func__, __LINE__, len);
+
+ if (ret)
+ return ret;
+ else
+ return len;
+}
+static DEVICE_ATTR_RO(log);
+
+static ssize_t coredump_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ ssize_t len;
+ struct usb_interface *intf = to_usb_interface(dev);
+ struct usb_stub *diag_stub = usb_stub_find(intf, DIAG_CMD_TYPE);
+
+ ret = diag_get_coredump(diag_stub, buf, &len);
+ dev_dbg(dev, "%s:%u len %ld\n", __func__, __LINE__, len);
+
+ if (ret)
+ return ret;
+ else
+ return len;
+}
+static DEVICE_ATTR_RO(coredump);
+
+static struct attribute *intel_ulpss_bridge_attrs[] = {
+ &dev_attr_version.attr,
+ &dev_attr_cmd.attr,
+ &dev_attr_log.attr,
+ &dev_attr_coredump.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(intel_ulpss_bridge);
+
+static int intel_ulpss_bridge_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_bridge *intel_ulpss_dev;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_stub *stub;
+ int ret;
+
+ /* allocate memory for our device state and initialize it */
+ intel_ulpss_dev = kzalloc(sizeof(*intel_ulpss_dev), GFP_KERNEL);
+ if (!intel_ulpss_dev)
+ return -ENOMEM;
+
+ intel_ulpss_bridge_init(intel_ulpss_dev);
+ intel_ulpss_dev->udev = usb_get_dev(interface_to_usbdev(intf));
+ intel_ulpss_dev->intf = usb_get_intf(intf);
+
+ /* set up the endpoint information */
+ /* use only the first bulk-in and bulk-out endpoints */
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in,
+ &bulk_out, NULL, NULL);
+ if (ret) {
+ dev_err(&intf->dev,
+ "Could not find both bulk-in and bulk-out endpoints\n");
+ goto error;
+ }
+
+ intel_ulpss_dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
+ intel_ulpss_dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
+ intel_ulpss_dev->bulk_in_buffer =
+ kzalloc(intel_ulpss_dev->bulk_in_size, GFP_KERNEL);
+ if (!intel_ulpss_dev->bulk_in_buffer) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ intel_ulpss_dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!intel_ulpss_dev->bulk_in_urb) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ intel_ulpss_dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
+
+ dev_dbg(&intf->dev, "bulk_in size:%ld addr:%d bulk_out addr:%d\n",
+ intel_ulpss_dev->bulk_in_size,
+ intel_ulpss_dev->bulk_in_endpointAddr,
+ intel_ulpss_dev->bulk_out_endpointAddr);
+
+ /* save our data pointer in this intf device */
+ usb_set_intfdata(intf, intel_ulpss_dev);
+
+ ret = intel_ulpss_bridge_read_start(intel_ulpss_dev);
+ if (ret) {
+ dev_err(&intf->dev, "%s bridge read start failed ret %d\n",
+ __func__, ret);
+ goto error;
+ }
+
+ ret = usb_stub_init(intf);
+ if (ret) {
+ dev_err(&intf->dev, "%s usb stub init failed ret %d\n",
+ __func__, ret);
+ usb_set_intfdata(intf, NULL);
+ goto error;
+ }
+
+ ret = mng_stub_init(intf, NULL, 0);
+ if (ret) {
+ dev_err(&intf->dev, "%s register mng stub failed ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = diag_stub_init(intf, NULL, 0);
+ if (ret) {
+ dev_err(&intf->dev, "%s register diag stub failed ret %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ stub = usb_stub_find(intf, MNG_CMD_TYPE);
+ if (!stub) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = mng_stub_link(intf, stub);
+ if (intel_ulpss_dev->state != USB_BRIDGE_STARTED) {
+ dev_err(&intf->dev, "%s mng stub link done ret:%d state:%d\n",
+ __func__, ret, intel_ulpss_dev->state);
+ return ret;
+ }
+
+ usb_enable_autosuspend(intel_ulpss_dev->udev);
+ dev_info(&intf->dev, "intel_ulpss USB bridge device init success\n");
+ return 0;
+
+error:
+ /* this frees allocated memory */
+ intel_ulpss_bridge_delete(intel_ulpss_dev);
+
+ return ret;
+}
+
+static void intel_ulpss_bridge_disconnect(struct usb_interface *intf)
+{
+ struct usb_bridge *intel_ulpss_dev;
+
+ intel_ulpss_dev = usb_get_intfdata(intf);
+ intel_ulpss_dev->disconnected = 1;
+
+ usb_kill_urb(intel_ulpss_dev->bulk_in_urb);
+ usb_kill_anchored_urbs(&intel_ulpss_dev->write_submitted);
+
+ usb_stub_cleanup(intf);
+ intel_ulpss_dev->state = USB_BRIDGE_STOPPED;
+
+ cancel_work_sync(&intel_ulpss_dev->event_work);
+
+ usb_set_intfdata(intf, NULL);
+ /* decrement our usage len */
+ intel_ulpss_bridge_delete(intel_ulpss_dev);
+
+ dev_dbg(&intf->dev, "USB bridge now disconnected\n");
+}
+
+static void intel_ulpss_bridge_draw_down(struct usb_bridge *intel_ulpss_dev)
+{
+ int time;
+
+ time = usb_wait_anchor_empty_timeout(&intel_ulpss_dev->write_submitted,
+ 1000);
+ if (!time)
+ usb_kill_anchored_urbs(&intel_ulpss_dev->write_submitted);
+ usb_kill_urb(intel_ulpss_dev->bulk_in_urb);
+}
+
+static int intel_ulpss_bridge_suspend(struct usb_interface *intf,
+ pm_message_t message)
+{
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+ dev_dbg(&intf->dev, "USB bridge now suspend\n");
+
+ intel_ulpss_bridge_draw_down(intel_ulpss_dev);
+ return 0;
+}
+
+static int intel_ulpss_bridge_resume(struct usb_interface *intf)
+{
+ int ret;
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+ dev_dbg(&intf->dev, "USB bridge now resume\n");
+
+ ret = intel_ulpss_bridge_read_start(intel_ulpss_dev);
+ if (ret) {
+ dev_err(&intf->dev, "%s bridge read start failed ret %d\n",
+ __func__, ret);
+ }
+ return ret;
+}
+static struct usb_driver bridge_driver = {
+ .name = "intel_ulpss",
+ .probe = intel_ulpss_bridge_probe,
+ .disconnect = intel_ulpss_bridge_disconnect,
+ .suspend = intel_ulpss_bridge_suspend,
+ .resume = intel_ulpss_bridge_resume,
+ .id_table = intel_ulpss_bridge_table,
+ .dev_groups = intel_ulpss_bridge_groups,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(bridge_driver);
+
+MODULE_AUTHOR("Ye Xiang <xiang.ye@intel.com>");
+MODULE_AUTHOR("Zhang Lixu <lixu.zhang@intel.com>");
+MODULE_DESCRIPTION("Intel LPSS USB driver");
+MODULE_LICENSE("GPL v2");
new file mode 100644
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#ifndef __ULPSS_BRIDGE_H__
+#define __ULPSS_BRIDGE_H__
+
+#include <linux/kfifo.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+#include "usb_stub.h"
+
+struct bridge_msg {
+ size_t len;
+ u8 buf[MAX_PACKET_SIZE];
+ unsigned long read_time;
+};
+
+enum USB_BRIDGE_STATE {
+ USB_BRIDGE_STOPPED = 0,
+ USB_BRIDGE_INITED,
+ USB_BRIDGE_START_SYNCING,
+ USB_BRIDGE_START_DISPATCH_STARTED,
+ USB_BRIDGE_START_READ_STARTED,
+ USB_BRIDGE_RESET_HANDSHAKE,
+ USB_BRIDGE_RESET_SYNCED,
+ USB_BRIDGE_ENUM_GPIO_PENDING,
+ USB_BRIDGE_ENUM_GPIO_COMPLETE,
+ USB_BRIDGE_ENUM_I2C_PENDING,
+ USB_BRIDGE_ENUM_I2C_COMPLETE,
+ USB_BRIDGE_STARTED,
+ USB_BRIDGE_FAILED,
+};
+
+struct usb_bridge {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
+ u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
+ /* in case we need to retract our submissions */
+ struct usb_anchor write_submitted;
+
+ /* the urb to read data with */
+ struct urb *bulk_in_urb;
+ /* the buffer to receive data */
+ unsigned char *bulk_in_buffer;
+ size_t bulk_in_size;
+
+ /* bridge status */
+ int errors; /* the last request tanked */
+ unsigned int disconnected;
+ int state;
+
+ struct mutex write_mutex;
+
+ /* stub */
+ size_t stub_count;
+ struct list_head stubs_list;
+
+ /* buffers to store bridge msg temporary */
+ DECLARE_KFIFO(msg_fifo, struct bridge_msg, 16);
+ spinlock_t msg_fifo_spinlock;
+
+ /* dispatch message from fw */
+ struct work_struct event_work;
+
+ /* to wait for an ongoing write ack */
+ wait_queue_head_t bulk_out_ack;
+};
+
+ssize_t intel_ulpss_bridge_write(struct usb_interface *intf, void *data, size_t len,
+ unsigned int timeout);
+
+#endif /* __ULPSS_BRIDGE_H__ */
new file mode 100644
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "protocol_intel_ulpss.h"
+#include "ulpss_bridge.h"
+
+#define MAKE_CMD_ID(type, cmd) ((type << 8) | cmd)
+static bool usb_stub_validate(u8 *data, u32 data_len)
+{
+ bool is_valid = true;
+ struct cmd_header *header = (struct cmd_header *)data;
+
+ /* verify the respone flag */
+ if (header->cmd != GPIO_INTR_NOTIFY &&
+ ((header->flags & RESP_FLAG) == 0))
+ is_valid = false;
+
+ /* verify the payload len */
+ is_valid = is_valid && (header->len + sizeof(*header) == data_len);
+
+ return is_valid;
+}
+
+static int usb_stub_parse(struct usb_stub *stub, struct cmd_header *header)
+{
+ int ret = 0;
+
+ if (!stub || !header || (header->len < 0))
+ return -EINVAL;
+
+ stub->len = header->len;
+
+ if (header->len == 0)
+ return 0;
+
+ memcpy(stub->buf, header->data, header->len);
+ if (stub->parse)
+ ret = stub->parse(stub, header->cmd, header->flags,
+ header->data, header->len);
+
+ return ret;
+}
+
+/*
+ * Bottom half processing work function (instead of thread handler)
+ * for processing fw messages
+ */
+static void event_work_cb(struct work_struct *work)
+{
+ struct usb_bridge *intel_ulpss_dev;
+ struct bridge_msg msg_in_proc = { 0 };
+ struct usb_stub *stub;
+ struct cmd_header *header;
+ int rcv_cmd_id;
+ int ret;
+
+ intel_ulpss_dev = container_of(work, struct usb_bridge, event_work);
+ BUG_ON(!intel_ulpss_dev);
+
+ while (kfifo_get(&intel_ulpss_dev->msg_fifo, &msg_in_proc)) {
+ if (!msg_in_proc.len)
+ continue;
+
+ header = (struct cmd_header *)msg_in_proc.buf;
+
+ dev_dbg(&intel_ulpss_dev->intf->dev,
+ "receive: type:%d cmd:%d flags:%d len:%d\n",
+ header->type, header->cmd, header->flags, header->len);
+
+ /* verify the data */
+ if (!usb_stub_validate(msg_in_proc.buf, msg_in_proc.len)) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s header->len:%d payload_len:%ld\n ",
+ __func__, header->len, msg_in_proc.len);
+ continue;
+ }
+
+ stub = usb_stub_find(intel_ulpss_dev->intf, header->type);
+ ret = usb_stub_parse(stub, header);
+ if (ret) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s failed to parse data: ret:%d type:%d len: %d",
+ __func__, ret, header->type, header->len);
+ continue;
+ }
+
+ rcv_cmd_id = MAKE_CMD_ID(stub->type, header->cmd);
+ if (rcv_cmd_id == stub->cmd_id) {
+ stub->acked = true;
+ wake_up_interruptible(&intel_ulpss_dev->bulk_out_ack);
+
+ } else {
+ dev_warn(&intel_ulpss_dev->intf->dev,
+ "%s rcv_cmd_id:%x != stub->cmd_id:%x",
+ __func__, rcv_cmd_id, stub->cmd_id);
+ }
+ }
+}
+
+struct usb_stub *usb_stub_alloc(struct usb_interface *intf)
+{
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+ struct usb_stub *cur_stub;
+
+ cur_stub = kzalloc(sizeof(struct usb_stub), GFP_KERNEL);
+ if (!cur_stub) {
+ dev_err(&intf->dev, "%s no memory for new stub", __func__);
+ return NULL;
+ }
+
+ mutex_init(&cur_stub->stub_mutex);
+ INIT_LIST_HEAD(&cur_stub->list);
+
+ list_add_tail(&cur_stub->list, &intel_ulpss_dev->stubs_list);
+ intel_ulpss_dev->stub_count++;
+ dev_dbg(&intf->dev,
+ "%s enuming stub intel_ulpss_dev->stub_count:%ld type:%d success\n",
+ __func__, intel_ulpss_dev->stub_count, cur_stub->type);
+
+ return cur_stub;
+}
+
+int usb_stub_init(struct usb_interface *intf)
+{
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+
+ if (!intel_ulpss_dev)
+ return -EINVAL;
+
+ INIT_WORK(&intel_ulpss_dev->event_work, event_work_cb);
+
+ return 0;
+}
+
+struct usb_stub *usb_stub_find(struct usb_interface *intf, u8 type)
+{
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+ struct usb_stub *cur_stub;
+
+ if (!intel_ulpss_dev)
+ return NULL;
+
+ list_for_each_entry (cur_stub, &intel_ulpss_dev->stubs_list, list) {
+ if (cur_stub->type == type)
+ return cur_stub;
+ }
+
+ dev_err(&intf->dev, "%s usb stub not find, type: %d", __func__, type);
+ return NULL;
+}
+
+int usb_stub_write(struct usb_stub *stub, u8 cmd, u8 *data, u8 len,
+ bool wait_ack, long timeout)
+{
+ int ret;
+ u8 flags = 0;
+ u8 buff[MAX_PACKET_SIZE] = { 0 };
+
+ struct cmd_header *header;
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(stub->intf);
+
+ if (!stub || !intel_ulpss_dev)
+ return -EINVAL;
+
+ header = (struct cmd_header *)buff;
+ if (len >= MAX_PAYLOAD_SIZE)
+ return -ENOMEM;
+
+ if (wait_ack)
+ flags |= ACK_FLAG;
+
+ flags |= CMPL_FLAG;
+
+ header->type = stub->type;
+ header->cmd = cmd;
+ header->flags = flags;
+ header->len = len;
+
+ memcpy(header->data, data, len);
+ dev_dbg(&intel_ulpss_dev->intf->dev,
+ "send: type:%d cmd:%d flags:%d len:%d\n", header->type,
+ header->cmd, header->flags, header->len);
+
+ stub->cmd_id = MAKE_CMD_ID(stub->type, cmd);
+
+ ret = intel_ulpss_bridge_write(
+ stub->intf, header, sizeof(struct cmd_header) + len, timeout);
+
+ if (ret != sizeof(struct cmd_header) + len) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "%s bridge write failed ret:%d total_len:%ld\n ",
+ __func__, ret, sizeof(struct cmd_header) + len);
+ return -EIO;
+ }
+
+ if (flags & ACK_FLAG) {
+ ret = wait_event_interruptible_timeout(
+ intel_ulpss_dev->bulk_out_ack, (stub->acked), timeout);
+ stub->acked = false;
+
+ if (ret < 0) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "acked wait interrupted ret:%d timeout:%ld ack:%d\n",
+ ret, timeout, stub->acked);
+ return ret;
+
+ } else if (ret == 0) {
+ dev_err(&intel_ulpss_dev->intf->dev,
+ "acked sem wait timed out ret:%d timeout:%ld ack:%d\n",
+ ret, timeout, stub->acked);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void usb_stub_broadcast(struct usb_interface *intf, long event,
+ void *event_data)
+{
+ int ret = 0;
+ struct usb_stub *cur_stub = NULL;
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+
+ if (!intel_ulpss_dev)
+ return;
+
+ list_for_each_entry (cur_stub, &intel_ulpss_dev->stubs_list, list) {
+ if (cur_stub && cur_stub->notify) {
+ ret = cur_stub->notify(cur_stub, event, event_data);
+ if (ret)
+ continue;
+ }
+ }
+}
+
+void usb_stub_cleanup(struct usb_interface *intf)
+{
+ struct usb_stub *cur_stub = NULL;
+ struct usb_stub *next = NULL;
+ struct usb_bridge *intel_ulpss_dev = usb_get_intfdata(intf);
+
+ if (!intel_ulpss_dev)
+ return;
+
+ list_for_each_entry_safe (cur_stub, next, &intel_ulpss_dev->stubs_list,
+ list) {
+ if (!cur_stub)
+ continue;
+
+ list_del_init(&cur_stub->list);
+ dev_dbg(&intf->dev, "%s type:%d\n ", __func__, cur_stub->type);
+ if (cur_stub->cleanup)
+ cur_stub->cleanup(cur_stub);
+
+ mutex_destroy(&cur_stub->stub_mutex);
+ kfree(cur_stub);
+
+ intel_ulpss_dev->stub_count--;
+ }
+}
+
+struct acpi_device *find_adev_by_hid(struct acpi_device *parent,
+ const char *hid)
+{
+ struct acpi_device *adev;
+
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry (adev, &parent->children, node) {
+ if (!strcmp(hid, acpi_device_hid(adev)))
+ return adev;
+ }
+
+ return NULL;
+}
new file mode 100644
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel LPSS USB driver
+ *
+ * Copyright (c) 2020, Intel Corporation.
+ */
+
+#ifndef __USB_STUB_H__
+#define __USB_STUB_H__
+
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+#include "protocol_intel_ulpss.h"
+
+#define GPIO_INTR_EVENT 2
+
+struct usb_stub {
+ struct list_head list;
+ u8 type;
+ struct usb_interface *intf;
+
+ struct mutex stub_mutex;
+ u8 buf[MAX_PAYLOAD_SIZE];
+ u32 len;
+
+ bool acked;
+ /** for identify ack */
+ int cmd_id;
+
+ int (*parse)(struct usb_stub *stub, u8 cmd, u8 flags, u8 *data,
+ u32 len);
+ int (*notify)(struct usb_stub *stub, long event, void *evt_data);
+ void (*cleanup)(struct usb_stub *stub);
+ void *priv;
+};
+
+int usb_stub_init(struct usb_interface *intf);
+struct usb_stub *usb_stub_find(struct usb_interface *intf, u8 type);
+int usb_stub_write(struct usb_stub *stub, u8 cmd, u8 *data, u8 len,
+ bool wait_ack, long timeout);
+void usb_stub_broadcast(struct usb_interface *intf, long event,
+ void *event_data);
+void usb_stub_cleanup(struct usb_interface *intf);
+struct usb_stub *usb_stub_alloc(struct usb_interface *intf);
+
+struct acpi_device *find_adev_by_hid(struct acpi_device *parent,
+ const char *hid);
+#endif
new file mode 100644
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_USB_LJCA_H
+#define __LINUX_USB_LJCA_H
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define MAX_GPIO_NUM 64
+
+struct ljca_gpio_info {
+ int num;
+ DECLARE_BITMAP(valid_pin_map, MAX_GPIO_NUM);
+};
+
+struct ljca_i2c_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+};
+
+struct ljca_spi_info {
+ u8 id;
+ u8 capacity;
+};
+
+struct ljca_platform_data {
+ int type;
+ union {
+ struct ljca_gpio_info gpio_info;
+ struct ljca_i2c_info i2c_info;
+ struct ljca_spi_info spi_info;
+ };
+};
+
+typedef void (*ljca_event_cb_t)(struct platform_device *pdev, u8 cmd,
+ const void *evt_data, int len);
+
+int ljca_register_event_cb(struct platform_device *pdev,
+ ljca_event_cb_t event_cb);
+void ljca_unregister_event_cb(struct platform_device *pdev);
+int ljca_transfer(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len, void *ibuf, int *ibuf_len);
+int ljca_transfer_noack(struct platform_device *pdev, u8 cmd, const void *obuf,
+ int obuf_len);
+
+#endif
new file mode 100644
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_VSC_H_
+#define _LINUX_VSC_H_
+
+#include <linux/types.h>
+
+/**
+ * @brief VSC camera ownership definition
+ */
+enum vsc_camera_owner {
+ VSC_CAMERA_NONE = 0,
+ VSC_CAMERA_CVF,
+ VSC_CAMERA_IPU,
+};
+
+/**
+ * @brief VSC privacy status definition
+ */
+enum vsc_privacy_status {
+ VSC_PRIVACY_ON = 0,
+ VSC_PRIVACY_OFF,
+};
+
+/**
+ * @brief VSC MIPI configuration definition
+ */
+struct vsc_mipi_config {
+ uint32_t freq;
+ uint32_t lane_num;
+};
+
+/**
+ * @brief VSC camera status definition
+ */
+struct vsc_camera_status {
+ enum vsc_camera_owner owner;
+ enum vsc_privacy_status status;
+ uint32_t exposure_level;
+};
+
+/**
+ * @brief VSC privacy callback type definition
+ *
+ * @param context Privacy callback handle
+ * @param status Current privacy status
+ */
+typedef void (*vsc_privacy_callback_t)(void *handle,
+ enum vsc_privacy_status status);
+
+/**
+ * @brief Acquire camera sensor ownership to IPU
+ *
+ * @param config[IN] The pointer of MIPI configuration going to set
+ * @param callback[IN] The pointer of privacy callback function
+ * @param handle[IN] Privacy callback function runtime handle from IPU driver
+ * @param status[OUT] The pointer of camera status after the acquire
+ *
+ * @retval 0 If success
+ * @retval -EIO IO error
+ * @retval -EINVAL Invalid argument
+ * @retval -EAGAIN VSC device not ready
+ * @retval negative values for other errors
+ */
+int vsc_acquire_camera_sensor(struct vsc_mipi_config *config,
+ vsc_privacy_callback_t callback,
+ void *handle,
+ struct vsc_camera_status *status);
+
+/**
+ * @brief Release camera sensor ownership
+ *
+ * @param status[OUT] Camera status after the release
+ *
+ * @retval 0 If success
+ * @retval -EIO IO error
+ * @retval -EINVAL Invalid argument
+ * @retval -EAGAIN VSC device not ready
+ * @retval negative values for other errors
+ */
+int vsc_release_camera_sensor(struct vsc_camera_status *status);
+
+#endif
new file mode 100644
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 - 2020 Intel Corporation */
+
+#ifndef MEDIA_IPU_H
+#define MEDIA_IPU_H
+
+#include <linux/i2c.h>
+#include <linux/clkdev.h>
+#include <media/v4l2-async.h>
+
+#define IPU_ISYS_MAX_CSI2_LANES 4
+
+struct ipu_isys_csi2_config {
+ unsigned int nlanes;
+ unsigned int port;
+};
+
+struct ipu_isys_subdev_i2c_info {
+ struct i2c_board_info board_info;
+ int i2c_adapter_id;
+ char i2c_adapter_bdf[32];
+};
+
+struct ipu_isys_subdev_info {
+ struct ipu_isys_csi2_config *csi2;
+ struct ipu_isys_subdev_i2c_info i2c;
+};
+
+struct ipu_isys_clk_mapping {
+ struct clk_lookup clkdev_data;
+ char *platform_clock_name;
+};
+
+struct ipu_isys_subdev_pdata {
+ struct ipu_isys_subdev_info **subdevs;
+ struct ipu_isys_clk_mapping *clk_map;
+};
+
+struct sensor_async_subdev {
+ struct v4l2_async_subdev asd;
+ struct ipu_isys_csi2_config csi2;
+};
+
+#endif /* MEDIA_IPU_H */
new file mode 100644
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2016 - 2020 Intel Corporation */
+
+#ifndef UAPI_LINUX_IPU_ISYS_H
+#define UAPI_LINUX_IPU_ISYS_H
+
+#define V4L2_CID_IPU_BASE (V4L2_CID_USER_BASE + 0x1080)
+
+#define V4L2_CID_IPU_STORE_CSI2_HEADER (V4L2_CID_IPU_BASE + 2)
+#define V4L2_CID_IPU_ISYS_COMPRESSION (V4L2_CID_IPU_BASE + 3)
+
+#define VIDIOC_IPU_GET_DRIVER_VERSION \
+ _IOWR('v', BASE_VIDIOC_PRIVATE + 3, uint32_t)
+
+#endif /* UAPI_LINUX_IPU_ISYS_H */
new file mode 100644
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2013 - 2020 Intel Corporation */
+
+#ifndef _UAPI_IPU_PSYS_H
+#define _UAPI_IPU_PSYS_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+struct ipu_psys_capability {
+ uint32_t version;
+ uint8_t driver[20];
+ uint32_t pg_count;
+ uint8_t dev_model[32];
+ uint32_t reserved[17];
+} __attribute__ ((packed));
+
+struct ipu_psys_event {
+ uint32_t type; /* IPU_PSYS_EVENT_TYPE_ */
+ uint64_t user_token;
+ uint64_t issue_id;
+ uint32_t buffer_idx;
+ uint32_t error;
+ int32_t reserved[2];
+} __attribute__ ((packed));
+
+#define IPU_PSYS_EVENT_TYPE_CMD_COMPLETE 1
+#define IPU_PSYS_EVENT_TYPE_BUFFER_COMPLETE 2
+
+/**
+ * struct ipu_psys_buffer - for input/output terminals
+ * @len: total allocated size @ base address
+ * @userptr: user pointer
+ * @fd: DMA-BUF handle
+ * @data_offset:offset to valid data
+ * @bytes_used: amount of valid data including offset
+ * @flags: flags
+ */
+struct ipu_psys_buffer {
+ uint64_t len;
+ union {
+ int fd;
+ void __user *userptr;
+ uint64_t reserved;
+ } base;
+ uint32_t data_offset;
+ uint32_t bytes_used;
+ uint32_t flags;
+ uint32_t reserved[2];
+} __attribute__ ((packed));
+
+#define IPU_BUFFER_FLAG_INPUT (1 << 0)
+#define IPU_BUFFER_FLAG_OUTPUT (1 << 1)
+#define IPU_BUFFER_FLAG_MAPPED (1 << 2)
+#define IPU_BUFFER_FLAG_NO_FLUSH (1 << 3)
+#define IPU_BUFFER_FLAG_DMA_HANDLE (1 << 4)
+#define IPU_BUFFER_FLAG_USERPTR (1 << 5)
+
+#define IPU_PSYS_CMD_PRIORITY_HIGH 0
+#define IPU_PSYS_CMD_PRIORITY_MED 1
+#define IPU_PSYS_CMD_PRIORITY_LOW 2
+#define IPU_PSYS_CMD_PRIORITY_NUM 3
+
+/**
+ * struct ipu_psys_command - processing command
+ * @issue_id: unique id for the command set by user
+ * @user_token: token of the command
+ * @priority: priority of the command
+ * @pg_manifest: userspace pointer to program group manifest
+ * @buffers: userspace pointers to array of psys dma buf structs
+ * @pg: process group DMA-BUF handle
+ * @pg_manifest_size: size of program group manifest
+ * @bufcount: number of buffers in buffers array
+ * @min_psys_freq: minimum psys frequency in MHz used for this cmd
+ * @frame_counter: counter of current frame synced between isys and psys
+ * @kernel_enable_bitmap: enable bits for each individual kernel
+ * @terminal_enable_bitmap: enable bits for each individual terminals
+ * @routing_enable_bitmap: enable bits for each individual routing
+ * @rbm: enable bits for routing
+ *
+ * Specifies a processing command with input and output buffers.
+ */
+struct ipu_psys_command {
+ uint64_t issue_id;
+ uint64_t user_token;
+ uint32_t priority;
+ void __user *pg_manifest;
+ struct ipu_psys_buffer __user *buffers;
+ int pg;
+ uint32_t pg_manifest_size;
+ uint32_t bufcount;
+ uint32_t min_psys_freq;
+ uint32_t frame_counter;
+ uint32_t kernel_enable_bitmap[4];
+ uint32_t terminal_enable_bitmap[4];
+ uint32_t routing_enable_bitmap[4];
+ uint32_t rbm[5];
+ uint32_t reserved[2];
+} __attribute__ ((packed));
+
+struct ipu_psys_manifest {
+ uint32_t index;
+ uint32_t size;
+ void __user *manifest;
+ uint32_t reserved[5];
+} __attribute__ ((packed));
+
+#define IPU_IOC_QUERYCAP _IOR('A', 1, struct ipu_psys_capability)
+#define IPU_IOC_MAPBUF _IOWR('A', 2, int)
+#define IPU_IOC_UNMAPBUF _IOWR('A', 3, int)
+#define IPU_IOC_GETBUF _IOWR('A', 4, struct ipu_psys_buffer)
+#define IPU_IOC_PUTBUF _IOWR('A', 5, struct ipu_psys_buffer)
+#define IPU_IOC_QCMD _IOWR('A', 6, struct ipu_psys_command)
+#define IPU_IOC_DQEVENT _IOWR('A', 7, struct ipu_psys_event)
+#define IPU_IOC_CMD_CANCEL _IOWR('A', 8, struct ipu_psys_command)
+#define IPU_IOC_GET_MANIFEST _IOWR('A', 9, struct ipu_psys_manifest)
+
+#endif /* _UAPI_IPU_PSYS_H */