Commit 7817ffd2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'platform-drivers-x86-v5.2-1' of git://git.infradead.org/linux-platform-drivers-x86

Pull x86 platform driver updates from Andy Shevchenko:
 "Gathered pile of patches for Platform Drivers x86. No surprises and no
  merge conflicts. Business as usual.

  Summary:

   - New driver of power button for Basin Cove PMIC.

   - ASUS WMI driver has got a Fn lock mode switch support.

   - Resolve a never end story with non working Wi-Fi on newer Lenovo
     Ideapad computers. Now the black list is replaced with white list.

   - New facility to debug S0ix failures on Intel Atom platforms. The
     Intel PMC and accompanying drivers are cleaned up.

   - Mellanox got a new TmFifo driver. Besides tachometer sensor and
     watchdog are enabled on Mellanox platforms.

   - The information of embedded controller is now recognized on new
     Thinkpads. Bluetooth driver on Thinkpads is blacklisted for some
     models.

   - Touchscreen DMI driver extended to support 'jumper ezpad 6 pro b'
     and Myria MY8307 2-in-1.

   - Additionally few small fixes here and there for WMI and ACPI laptop
     drivers.

   - The following is an automated git shortlog grouped by driver:

   - alienware-wmi:
      - printing the wrong error code
      - fix kfree on potentially uninitialized pointer

   - asus-wmi:
      - Add fn-lock mode switch support

   - dell-laptop:
      - fix rfkill functionality

   - dell-rbtn:
      - Add missing #include

   - ideapad-laptop:
      - Remove no_hw_rfkill_list

   - intel_pmc_core:
      - Allow to dump debug registers on S0ix failure
      - Convert to a platform_driver
      - Mark local function static

   - intel_pmc_ipc:
      - Don't map non-used optional resources
      - Apply same width for offset definitions
      - Use BIT() macro
      - adding error handling

   - intel_punit_ipc:
      - Revert "Fix resource ioremap warning"

   - mlx-platform:
      - Add mlx-wdt platform driver activation
      - Add support for tachometer speed register
      - Add TmFifo driver for Mellanox BlueField Soc

   - sony-laptop:
      - Fix unintentional fall-through

   - thinkpad_acpi:
      - cleanup for Thinkpad ACPI led
      - Mark expected switch fall-throughs
      - fix spelling mistake "capabilites" -> "capabilities"
      - Read EC information on newer models
      - Disable Bluetooth for some machines

   - touchscreen_dmi:
      - Add info for 'jumper ezpad 6 pro b' touchscreen
      - Add info for Myria MY8307 2-in-1"

* tag 'platform-drivers-x86-v5.2-1' of git://git.infradead.org/linux-platform-drivers-x86: (26 commits)
  platform/x86: Add support for Basin Cove power button
  platform/x86: asus-wmi: Add fn-lock mode switch support
  platform/x86: ideapad-laptop: Remove no_hw_rfkill_list
  platform/x86: touchscreen_dmi: Add info for 'jumper ezpad 6 pro b' touchscreen
  platform/x86: thinkpad_acpi: cleanup for Thinkpad ACPI led
  platform/x86: thinkpad_acpi: Mark expected switch fall-throughs
  platform/x86: sony-laptop: Fix unintentional fall-through
  platform/x86: alienware-wmi: printing the wrong error code
  platform/x86: intel_pmc_core: Allow to dump debug registers on S0ix failure
  platform/x86: intel_pmc_core: Convert to a platform_driver
  platform/x86: mlx-platform: Add mlx-wdt platform driver activation
  platform/x86: mlx-platform: Add support for tachometer speed register
  platform/mellanox: Add TmFifo driver for Mellanox BlueField Soc
  platform/x86: thinkpad_acpi: fix spelling mistake "capabilites" -> "capabilities"
  platform/x86: intel_punit_ipc: Revert "Fix resource ioremap warning"
  platform/x86: intel_pmc_ipc: Don't map non-used optional resources
  platform/x86: intel_pmc_ipc: Apply same width for offset definitions
  platform/x86: intel_pmc_ipc: Use BIT() macro
  platform/x86: alienware-wmi: fix kfree on potentially uninitialized pointer
  platform/x86: dell-laptop: fix rfkill functionality
  ...
parents cccd559e 6456fd73
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
menuconfig MELLANOX_PLATFORM menuconfig MELLANOX_PLATFORM
bool "Platform support for Mellanox hardware" bool "Platform support for Mellanox hardware"
depends on X86 || ARM || COMPILE_TEST depends on X86 || ARM || ARM64 || COMPILE_TEST
---help--- ---help---
Say Y here to get to see options for platform support for Say Y here to get to see options for platform support for
Mellanox systems. This option alone does not add any kernel code. Mellanox systems. This option alone does not add any kernel code.
...@@ -34,4 +34,14 @@ config MLXREG_IO ...@@ -34,4 +34,14 @@ config MLXREG_IO
to system resets operation, system reset causes monitoring and some to system resets operation, system reset causes monitoring and some
kinds of mux selection. kinds of mux selection.
config MLXBF_TMFIFO
tristate "Mellanox BlueField SoC TmFifo platform driver"
depends on ARM64
depends on ACPI
depends on VIRTIO_CONSOLE && VIRTIO_NET
help
Say y here to enable TmFifo support. The TmFifo driver provides
platform driver support for the TmFifo which supports console
and networking based on the virtio framework.
endif # MELLANOX_PLATFORM endif # MELLANOX_PLATFORM
...@@ -3,5 +3,6 @@ ...@@ -3,5 +3,6 @@
# Makefile for linux/drivers/platform/mellanox # Makefile for linux/drivers/platform/mellanox
# Mellanox Platform-Specific Drivers # Mellanox Platform-Specific Drivers
# #
obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019, Mellanox Technologies. All rights reserved.
*/
#ifndef __MLXBF_TMFIFO_REGS_H__
#define __MLXBF_TMFIFO_REGS_H__
#include <linux/types.h>
#include <linux/bits.h>
#define MLXBF_TMFIFO_TX_DATA 0x00
#define MLXBF_TMFIFO_TX_STS 0x08
#define MLXBF_TMFIFO_TX_STS__LENGTH 0x0001
#define MLXBF_TMFIFO_TX_STS__COUNT_SHIFT 0
#define MLXBF_TMFIFO_TX_STS__COUNT_WIDTH 9
#define MLXBF_TMFIFO_TX_STS__COUNT_RESET_VAL 0
#define MLXBF_TMFIFO_TX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
#define MLXBF_TMFIFO_TX_STS__COUNT_MASK GENMASK_ULL(8, 0)
#define MLXBF_TMFIFO_TX_CTL 0x10
#define MLXBF_TMFIFO_TX_CTL__LENGTH 0x0001
#define MLXBF_TMFIFO_TX_CTL__LWM_SHIFT 0
#define MLXBF_TMFIFO_TX_CTL__LWM_WIDTH 8
#define MLXBF_TMFIFO_TX_CTL__LWM_RESET_VAL 128
#define MLXBF_TMFIFO_TX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
#define MLXBF_TMFIFO_TX_CTL__LWM_MASK GENMASK_ULL(7, 0)
#define MLXBF_TMFIFO_TX_CTL__HWM_SHIFT 8
#define MLXBF_TMFIFO_TX_CTL__HWM_WIDTH 8
#define MLXBF_TMFIFO_TX_CTL__HWM_RESET_VAL 128
#define MLXBF_TMFIFO_TX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
#define MLXBF_TMFIFO_TX_CTL__HWM_MASK GENMASK_ULL(15, 8)
#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_SHIFT 32
#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_WIDTH 9
#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RESET_VAL 256
#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
#define MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
#define MLXBF_TMFIFO_RX_DATA 0x00
#define MLXBF_TMFIFO_RX_STS 0x08
#define MLXBF_TMFIFO_RX_STS__LENGTH 0x0001
#define MLXBF_TMFIFO_RX_STS__COUNT_SHIFT 0
#define MLXBF_TMFIFO_RX_STS__COUNT_WIDTH 9
#define MLXBF_TMFIFO_RX_STS__COUNT_RESET_VAL 0
#define MLXBF_TMFIFO_RX_STS__COUNT_RMASK GENMASK_ULL(8, 0)
#define MLXBF_TMFIFO_RX_STS__COUNT_MASK GENMASK_ULL(8, 0)
#define MLXBF_TMFIFO_RX_CTL 0x10
#define MLXBF_TMFIFO_RX_CTL__LENGTH 0x0001
#define MLXBF_TMFIFO_RX_CTL__LWM_SHIFT 0
#define MLXBF_TMFIFO_RX_CTL__LWM_WIDTH 8
#define MLXBF_TMFIFO_RX_CTL__LWM_RESET_VAL 128
#define MLXBF_TMFIFO_RX_CTL__LWM_RMASK GENMASK_ULL(7, 0)
#define MLXBF_TMFIFO_RX_CTL__LWM_MASK GENMASK_ULL(7, 0)
#define MLXBF_TMFIFO_RX_CTL__HWM_SHIFT 8
#define MLXBF_TMFIFO_RX_CTL__HWM_WIDTH 8
#define MLXBF_TMFIFO_RX_CTL__HWM_RESET_VAL 128
#define MLXBF_TMFIFO_RX_CTL__HWM_RMASK GENMASK_ULL(7, 0)
#define MLXBF_TMFIFO_RX_CTL__HWM_MASK GENMASK_ULL(15, 8)
#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_SHIFT 32
#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_WIDTH 9
#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RESET_VAL 256
#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_RMASK GENMASK_ULL(8, 0)
#define MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK GENMASK_ULL(40, 32)
#endif /* !defined(__MLXBF_TMFIFO_REGS_H__) */
// SPDX-License-Identifier: GPL-2.0+
/*
* Mellanox BlueField SoC TmFifo driver
*
* Copyright (C) 2019 Mellanox Technologies
*/
#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/efi.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/virtio_config.h>
#include <linux/virtio_console.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_net.h>
#include <linux/virtio_ring.h>
#include "mlxbf-tmfifo-regs.h"
/* Vring size. */
#define MLXBF_TMFIFO_VRING_SIZE SZ_1K
/* Console Tx buffer size. */
#define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
/* Console Tx buffer reserved space. */
#define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8
/* House-keeping timer interval. */
#define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10)
/* Virtual devices sharing the TM FIFO. */
#define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1)
/*
* Reserve 1/16 of TmFifo space, so console messages are not starved by
* the networking traffic.
*/
#define MLXBF_TMFIFO_RESERVE_RATIO 16
/* Message with data needs at least two words (for header & data). */
#define MLXBF_TMFIFO_DATA_MIN_WORDS 2
struct mlxbf_tmfifo;
/**
* mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
* @va: virtual address of the ring
* @dma: dma address of the ring
* @vq: pointer to the virtio virtqueue
* @desc: current descriptor of the pending packet
* @desc_head: head descriptor of the pending packet
* @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet
* @pkt_len: total length of the pending packet
* @next_avail: next avail descriptor id
* @num: vring size (number of descriptors)
* @align: vring alignment size
* @index: vring index
* @vdev_id: vring virtio id (VIRTIO_ID_xxx)
* @fifo: pointer to the tmfifo structure
*/
struct mlxbf_tmfifo_vring {
void *va;
dma_addr_t dma;
struct virtqueue *vq;
struct vring_desc *desc;
struct vring_desc *desc_head;
int cur_len;
int rem_len;
u32 pkt_len;
u16 next_avail;
int num;
int align;
int index;
int vdev_id;
struct mlxbf_tmfifo *fifo;
};
/* Interrupt types. */
enum {
MLXBF_TM_RX_LWM_IRQ,
MLXBF_TM_RX_HWM_IRQ,
MLXBF_TM_TX_LWM_IRQ,
MLXBF_TM_TX_HWM_IRQ,
MLXBF_TM_MAX_IRQ
};
/* Ring types (Rx & Tx). */
enum {
MLXBF_TMFIFO_VRING_RX,
MLXBF_TMFIFO_VRING_TX,
MLXBF_TMFIFO_VRING_MAX
};
/**
* mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
* @vdev: virtio device, in which the vdev.id.device field has the
* VIRTIO_ID_xxx id to distinguish the virtual device.
* @status: status of the device
* @features: supported features of the device
* @vrings: array of tmfifo vrings of this device
* @config.cons: virtual console config -
* select if vdev.id.device is VIRTIO_ID_CONSOLE
* @config.net: virtual network config -
* select if vdev.id.device is VIRTIO_ID_NET
* @tx_buf: tx buffer used to buffer data before writing into the FIFO
*/
struct mlxbf_tmfifo_vdev {
struct virtio_device vdev;
u8 status;
u64 features;
struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
union {
struct virtio_console_config cons;
struct virtio_net_config net;
} config;
struct circ_buf tx_buf;
};
/**
* mlxbf_tmfifo_irq_info - Structure of the interrupt information
* @fifo: pointer to the tmfifo structure
* @irq: interrupt number
* @index: index into the interrupt array
*/
struct mlxbf_tmfifo_irq_info {
struct mlxbf_tmfifo *fifo;
int irq;
int index;
};
/**
* mlxbf_tmfifo - Structure of the TmFifo
* @vdev: array of the virtual devices running over the TmFifo
* @lock: lock to protect the TmFifo access
* @rx_base: mapped register base address for the Rx FIFO
* @tx_base: mapped register base address for the Tx FIFO
* @rx_fifo_size: number of entries of the Rx FIFO
* @tx_fifo_size: number of entries of the Tx FIFO
* @pend_events: pending bits for deferred events
* @irq_info: interrupt information
* @work: work struct for deferred process
* @timer: background timer
* @vring: Tx/Rx ring
* @spin_lock: spin lock
* @is_ready: ready flag
*/
struct mlxbf_tmfifo {
struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
struct mutex lock; /* TmFifo lock */
void __iomem *rx_base;
void __iomem *tx_base;
int rx_fifo_size;
int tx_fifo_size;
unsigned long pend_events;
struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
struct work_struct work;
struct timer_list timer;
struct mlxbf_tmfifo_vring *vring[2];
spinlock_t spin_lock; /* spin lock */
bool is_ready;
};
/**
* mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
* @type: message type
* @len: payload length in network byte order. Messages sent into the FIFO
* will be read by the other side as data stream in the same byte order.
* The length needs to be encoded into network order so both sides
* could understand it.
*/
struct mlxbf_tmfifo_msg_hdr {
u8 type;
__be16 len;
u8 unused[5];
} __packed __aligned(sizeof(u64));
/*
* Default MAC.
* This MAC address will be read from EFI persistent variable if configured.
* It can also be reconfigured with standard Linux tools.
*/
static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
};
/* EFI variable name of the MAC address. */
static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
/* Maximum L2 header length. */
#define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
/* Supported virtio-net features. */
#define MLXBF_TMFIFO_NET_FEATURES \
(BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
BIT_ULL(VIRTIO_NET_F_MAC))
#define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
/* Free vrings of the FIFO device. */
static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
struct mlxbf_tmfifo_vdev *tm_vdev)
{
struct mlxbf_tmfifo_vring *vring;
int i, size;
for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
vring = &tm_vdev->vrings[i];
if (vring->va) {
size = vring_size(vring->num, vring->align);
dma_free_coherent(tm_vdev->vdev.dev.parent, size,
vring->va, vring->dma);
vring->va = NULL;
if (vring->vq) {
vring_del_virtqueue(vring->vq);
vring->vq = NULL;
}
}
}
}
/* Allocate vrings for the FIFO. */
static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
struct mlxbf_tmfifo_vdev *tm_vdev)
{
struct mlxbf_tmfifo_vring *vring;
struct device *dev;
dma_addr_t dma;
int i, size;
void *va;
for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
vring = &tm_vdev->vrings[i];
vring->fifo = fifo;
vring->num = MLXBF_TMFIFO_VRING_SIZE;
vring->align = SMP_CACHE_BYTES;
vring->index = i;
vring->vdev_id = tm_vdev->vdev.id.device;
dev = &tm_vdev->vdev.dev;
size = vring_size(vring->num, vring->align);
va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
if (!va) {
mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
dev_err(dev->parent, "dma_alloc_coherent failed\n");
return -ENOMEM;
}
vring->va = va;
vring->dma = dma;
}
return 0;
}
/* Disable interrupts of the FIFO device. */
static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
{
int i, irq;
for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
irq = fifo->irq_info[i].irq;
fifo->irq_info[i].irq = 0;
disable_irq(irq);
}
}
/* Interrupt handler. */
static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
{
struct mlxbf_tmfifo_irq_info *irq_info = arg;
if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
schedule_work(&irq_info->fifo->work);
return IRQ_HANDLED;
}
/* Get the next packet descriptor from the vring. */
static struct vring_desc *
mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = vring->vq->vdev;
unsigned int idx, head;
if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
return NULL;
idx = vring->next_avail % vr->num;
head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
if (WARN_ON(head >= vr->num))
return NULL;
vring->next_avail++;
return &vr->desc[head];
}
/* Release virtio descriptor. */
static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc, u32 len)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = vring->vq->vdev;
u16 idx, vr_idx;
vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
idx = vr_idx % vr->num;
vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
/*
* Virtio could poll and check the 'idx' to decide whether the desc is
* done or not. Add a memory barrier here to make sure the update above
* completes before updating the idx.
*/
mb();
vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
}
/* Get the total length of the descriptor chain. */
static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = vring->vq->vdev;
u32 len = 0, idx;
while (desc) {
len += virtio32_to_cpu(vdev, desc->len);
if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
break;
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
}
return len;
}
static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc_head;
u32 len = 0;
if (vring->desc_head) {
desc_head = vring->desc_head;
len = vring->pkt_len;
} else {
desc_head = mlxbf_tmfifo_get_next_desc(vring);
len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
}
if (desc_head)
mlxbf_tmfifo_release_desc(vring, desc_head, len);
vring->pkt_len = 0;
vring->desc = NULL;
vring->desc_head = NULL;
}
static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc, bool is_rx)
{
struct virtio_device *vdev = vring->vq->vdev;
struct virtio_net_hdr *net_hdr;
net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
memset(net_hdr, 0, sizeof(*net_hdr));
}
/* Get and initialize the next packet. */
static struct vring_desc *
mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
{
struct vring_desc *desc;
desc = mlxbf_tmfifo_get_next_desc(vring);
if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
vring->desc_head = desc;
vring->desc = desc;
return desc;
}
/* House-keeping timer. */
static void mlxbf_tmfifo_timer(struct timer_list *t)
{
struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
int rx, tx;
rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
if (rx || tx)
schedule_work(&fifo->work);
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
}
/* Copy one console packet into the output buffer. */
static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct virtio_device *vdev = &cons->vdev;
u32 len, idx, seg;
void *addr;
while (desc) {
addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
len = virtio32_to_cpu(vdev, desc->len);
seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (len <= seg) {
memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
} else {
memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
addr += seg;
memcpy(cons->tx_buf.buf, addr, len - seg);
}
cons->tx_buf.head = (cons->tx_buf.head + len) %
MLXBF_TMFIFO_CON_TX_BUF_SIZE;
if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
break;
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
}
}
/* Copy console data into the output buffer. */
static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
struct mlxbf_tmfifo_vring *vring)
{
struct vring_desc *desc;
u32 len, avail;
desc = mlxbf_tmfifo_get_next_desc(vring);
while (desc) {
/* Release the packet if not enough space. */
len = mlxbf_tmfifo_get_pkt_len(vring, desc);
avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
mlxbf_tmfifo_release_desc(vring, desc, len);
break;
}
mlxbf_tmfifo_console_output_one(cons, vring, desc);
mlxbf_tmfifo_release_desc(vring, desc, len);
desc = mlxbf_tmfifo_get_next_desc(vring);
}
}
/* Get the number of available words in Rx FIFO for receiving. */
static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
{
u64 sts;
sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS);
return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
}
/* Get the number of available words in the TmFifo for sending. */
static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
{
int tx_reserve;
u32 count;
u64 sts;
/* Reserve some room in FIFO for console messages. */
if (vdev_id == VIRTIO_ID_NET)
tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
else
tx_reserve = 1;
sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS);
count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
return fifo->tx_fifo_size - tx_reserve - count;
}
/* Console Tx (move data from the output buffer into the TmFifo). */
static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
{
struct mlxbf_tmfifo_msg_hdr hdr;
struct mlxbf_tmfifo_vdev *cons;
unsigned long flags;
int size, seg;
void *addr;
u64 data;
/* Return if not enough space available. */
if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
return;
cons = fifo->vdev[VIRTIO_ID_CONSOLE];
if (!cons || !cons->tx_buf.buf)
return;
/* Return if no data to send. */
size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (size == 0)
return;
/* Adjust the size to available space. */
if (size + sizeof(hdr) > avail * sizeof(u64))
size = avail * sizeof(u64) - sizeof(hdr);
/* Write header. */
hdr.type = VIRTIO_ID_CONSOLE;
hdr.len = htons(size);
writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
/* Use spin-lock to protect the 'cons->tx_buf'. */
spin_lock_irqsave(&fifo->spin_lock, flags);
while (size > 0) {
addr = cons->tx_buf.buf + cons->tx_buf.tail;
seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
MLXBF_TMFIFO_CON_TX_BUF_SIZE);
if (seg >= sizeof(u64)) {
memcpy(&data, addr, sizeof(u64));
} else {
memcpy(&data, addr, seg);
memcpy((u8 *)&data + seg, cons->tx_buf.buf,
sizeof(u64) - seg);
}
writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
if (size >= sizeof(u64)) {
cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
MLXBF_TMFIFO_CON_TX_BUF_SIZE;
size -= sizeof(u64);
} else {
cons->tx_buf.tail = (cons->tx_buf.tail + size) %
MLXBF_TMFIFO_CON_TX_BUF_SIZE;
size = 0;
}
}
spin_unlock_irqrestore(&fifo->spin_lock, flags);
}
/* Rx/Tx one word in the descriptor buffer. */
static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc,
bool is_rx, int len)
{
struct virtio_device *vdev = vring->vq->vdev;
struct mlxbf_tmfifo *fifo = vring->fifo;
void *addr;
u64 data;
/* Get the buffer address of this desc. */
addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
/* Read a word from FIFO for Rx. */
if (is_rx)
data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
if (is_rx)
memcpy(addr + vring->cur_len, &data, sizeof(u64));
else
memcpy(&data, addr + vring->cur_len, sizeof(u64));
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
if (is_rx)
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
else
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
vring->cur_len = len;
}
/* Write the word into FIFO for Tx. */
if (!is_rx)
writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
}
/*
* Rx/Tx packet header.
*
* In Rx case, the packet might be found to belong to a different vring since
* the TmFifo is shared by different services. In such case, the 'vring_change'
* flag is set.
*/
static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
struct vring_desc *desc,
bool is_rx, bool *vring_change)
{
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_net_config *config;
struct mlxbf_tmfifo_msg_hdr hdr;
int vdev_id, hdr_len;
/* Read/Write packet header. */
if (is_rx) {
/* Drain one word from the FIFO. */
*(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
/* Skip the length 0 packets (keepalive). */
if (hdr.len == 0)
return;
/* Check packet type. */
if (hdr.type == VIRTIO_ID_NET) {
vdev_id = VIRTIO_ID_NET;
hdr_len = sizeof(struct virtio_net_hdr);
config = &fifo->vdev[vdev_id]->config.net;
if (ntohs(hdr.len) > config->mtu +
MLXBF_TMFIFO_NET_L2_OVERHEAD)
return;
} else {
vdev_id = VIRTIO_ID_CONSOLE;
hdr_len = 0;
}
/*
* Check whether the new packet still belongs to this vring.
* If not, update the pkt_len of the new vring.
*/
if (vdev_id != vring->vdev_id) {
struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
if (!tm_dev2)
return;
vring->desc = desc;
vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
*vring_change = true;
}
vring->pkt_len = ntohs(hdr.len) + hdr_len;
} else {
/* Network virtio has an extra header. */
hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
sizeof(struct virtio_net_hdr) : 0;
vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
hdr.len = htons(vring->pkt_len - hdr_len);
writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
}
vring->cur_len = hdr_len;
vring->rem_len = vring->pkt_len;
fifo->vring[is_rx] = vring;
}
/*
* Rx/Tx one descriptor.
*
* Return true to indicate more data available.
*/
static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
bool is_rx, int *avail)
{
const struct vring *vr = virtqueue_get_vring(vring->vq);
struct mlxbf_tmfifo *fifo = vring->fifo;
struct virtio_device *vdev;
bool vring_change = false;
struct vring_desc *desc;
unsigned long flags;
u32 len, idx;
vdev = &fifo->vdev[vring->vdev_id]->vdev;
/* Get the descriptor of the next packet. */
if (!vring->desc) {
desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
if (!desc)
return false;
} else {
desc = vring->desc;
}
/* Beginning of a packet. Start to Rx/Tx packet header. */
if (vring->pkt_len == 0) {
mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
(*avail)--;
/* Return if new packet is for another ring. */
if (vring_change)
return false;
goto mlxbf_tmfifo_desc_done;
}
/* Get the length of this desc. */
len = virtio32_to_cpu(vdev, desc->len);
if (len > vring->rem_len)
len = vring->rem_len;
/* Rx/Tx one word (8 bytes) if not done. */
if (vring->cur_len < len) {
mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
(*avail)--;
}
/* Check again whether it's done. */
if (vring->cur_len == len) {
vring->cur_len = 0;
vring->rem_len -= len;
/* Get the next desc on the chain. */
if (vring->rem_len > 0 &&
(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
idx = virtio16_to_cpu(vdev, desc->next);
desc = &vr->desc[idx];
goto mlxbf_tmfifo_desc_done;
}
/* Done and release the pending packet. */
mlxbf_tmfifo_release_pending_pkt(vring);
desc = NULL;
fifo->vring[is_rx] = NULL;
/* Notify upper layer that packet is done. */
spin_lock_irqsave(&fifo->spin_lock, flags);
vring_interrupt(0, vring->vq);
spin_unlock_irqrestore(&fifo->spin_lock, flags);
}
mlxbf_tmfifo_desc_done:
/* Save the current desc. */
vring->desc = desc;
return true;
}
/* Rx & Tx processing of a queue. */
static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
{
int avail = 0, devid = vring->vdev_id;
struct mlxbf_tmfifo *fifo;
bool more;
fifo = vring->fifo;
/* Return if vdev is not ready. */
if (!fifo->vdev[devid])
return;
/* Return if another vring is running. */
if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
return;
/* Only handle console and network for now. */
if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
return;
do {
/* Get available FIFO space. */
if (avail == 0) {
if (is_rx)
avail = mlxbf_tmfifo_get_rx_avail(fifo);
else
avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
if (avail <= 0)
break;
}
/* Console output always comes from the Tx buffer. */
if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
mlxbf_tmfifo_console_tx(fifo, avail);
break;
}
/* Handle one descriptor. */
more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
} while (more);
}
/* Handle Rx or Tx queues. */
static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
int irq_id, bool is_rx)
{
struct mlxbf_tmfifo_vdev *tm_vdev;
struct mlxbf_tmfifo_vring *vring;
int i;
if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
!fifo->irq_info[irq_id].irq)
return;
for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
tm_vdev = fifo->vdev[i];
if (tm_vdev) {
vring = &tm_vdev->vrings[queue_id];
if (vring->vq)
mlxbf_tmfifo_rxtx(vring, is_rx);
}
}
}
/* Work handler for Rx and Tx case. */
static void mlxbf_tmfifo_work_handler(struct work_struct *work)
{
struct mlxbf_tmfifo *fifo;
fifo = container_of(work, struct mlxbf_tmfifo, work);
if (!fifo->is_ready)
return;
mutex_lock(&fifo->lock);
/* Tx (Send data to the TmFifo). */
mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
MLXBF_TM_TX_LWM_IRQ, false);
/* Rx (Receive data from the TmFifo). */
mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
MLXBF_TM_RX_HWM_IRQ, true);
mutex_unlock(&fifo->lock);
}
/* The notify function is called when new buffers are posted. */
static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
{
struct mlxbf_tmfifo_vring *vring = vq->priv;
struct mlxbf_tmfifo_vdev *tm_vdev;
struct mlxbf_tmfifo *fifo;
unsigned long flags;
fifo = vring->fifo;
/*
* Virtio maintains vrings in pairs, even number ring for Rx
* and odd number ring for Tx.
*/
if (vring->index & BIT(0)) {
/*
* Console could make blocking call with interrupts disabled.
* In such case, the vring needs to be served right away. For
* other cases, just set the TX LWM bit to start Tx in the
* worker handler.
*/
if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
spin_lock_irqsave(&fifo->spin_lock, flags);
tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
mlxbf_tmfifo_console_output(tm_vdev, vring);
spin_unlock_irqrestore(&fifo->spin_lock, flags);
} else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
&fifo->pend_events)) {
return true;
}
} else {
if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
return true;
}
schedule_work(&fifo->work);
return true;
}
/* Get the array of feature bits for this device. */
static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
return tm_vdev->features;
}
/* Confirm device features to use. */
static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
tm_vdev->features = vdev->features;
return 0;
}
/* Free virtqueues found by find_vqs(). */
static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
struct mlxbf_tmfifo_vring *vring;
struct virtqueue *vq;
int i;
for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
vring = &tm_vdev->vrings[i];
/* Release the pending packet. */
if (vring->desc)
mlxbf_tmfifo_release_pending_pkt(vring);
vq = vring->vq;
if (vq) {
vring->vq = NULL;
vring_del_virtqueue(vq);
}
}
}
/* Create and initialize the virtual queues. */
static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
unsigned int nvqs,
struct virtqueue *vqs[],
vq_callback_t *callbacks[],
const char * const names[],
const bool *ctx,
struct irq_affinity *desc)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
struct mlxbf_tmfifo_vring *vring;
struct virtqueue *vq;
int i, ret, size;
if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
return -EINVAL;
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
ret = -EINVAL;
goto error;
}
vring = &tm_vdev->vrings[i];
/* zero vring */
size = vring_size(vring->num, vring->align);
memset(vring->va, 0, size);
vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
false, false, vring->va,
mlxbf_tmfifo_virtio_notify,
callbacks[i], names[i]);
if (!vq) {
dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
ret = -ENOMEM;
goto error;
}
vqs[i] = vq;
vring->vq = vq;
vq->priv = vring;
}
return 0;
error:
mlxbf_tmfifo_virtio_del_vqs(vdev);
return ret;
}
/* Read the status byte. */
static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
return tm_vdev->status;
}
/* Write the status byte. */
static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
u8 status)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
tm_vdev->status = status;
}
/* Reset the device. Not much here for now. */
static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
tm_vdev->status = 0;
}
/* Read the value of a configuration field. */
static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
unsigned int offset,
void *buf,
unsigned int len)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
if ((u64)offset + len > sizeof(tm_vdev->config))
return;
memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
}
/* Write the value of a configuration field. */
static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
unsigned int offset,
const void *buf,
unsigned int len)
{
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
if ((u64)offset + len > sizeof(tm_vdev->config))
return;
memcpy((u8 *)&tm_vdev->config + offset, buf, len);
}
static void tmfifo_virtio_dev_release(struct device *device)
{
struct virtio_device *vdev =
container_of(device, struct virtio_device, dev);
struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
kfree(tm_vdev);
}
/* Virtio config operations. */
static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
.get_features = mlxbf_tmfifo_virtio_get_features,
.finalize_features = mlxbf_tmfifo_virtio_finalize_features,
.find_vqs = mlxbf_tmfifo_virtio_find_vqs,
.del_vqs = mlxbf_tmfifo_virtio_del_vqs,
.reset = mlxbf_tmfifo_virtio_reset,
.set_status = mlxbf_tmfifo_virtio_set_status,
.get_status = mlxbf_tmfifo_virtio_get_status,
.get = mlxbf_tmfifo_virtio_get,
.set = mlxbf_tmfifo_virtio_set,
};
/* Create vdev for the FIFO. */
static int mlxbf_tmfifo_create_vdev(struct device *dev,
struct mlxbf_tmfifo *fifo,
int vdev_id, u64 features,
void *config, u32 size)
{
struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
int ret;
mutex_lock(&fifo->lock);
tm_vdev = fifo->vdev[vdev_id];
if (tm_vdev) {
dev_err(dev, "vdev %d already exists\n", vdev_id);
ret = -EEXIST;
goto fail;
}
tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
if (!tm_vdev) {
ret = -ENOMEM;
goto fail;
}
tm_vdev->vdev.id.device = vdev_id;
tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
tm_vdev->vdev.dev.parent = dev;
tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
tm_vdev->features = features;
if (config)
memcpy(&tm_vdev->config, config, size);
if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
dev_err(dev, "unable to allocate vring\n");
ret = -ENOMEM;
goto vdev_fail;
}
/* Allocate an output buffer for the console device. */
if (vdev_id == VIRTIO_ID_CONSOLE)
tm_vdev->tx_buf.buf = devm_kmalloc(dev,
MLXBF_TMFIFO_CON_TX_BUF_SIZE,
GFP_KERNEL);
fifo->vdev[vdev_id] = tm_vdev;
/* Register the virtio device. */
ret = register_virtio_device(&tm_vdev->vdev);
reg_dev = tm_vdev;
if (ret) {
dev_err(dev, "register_virtio_device failed\n");
goto vdev_fail;
}
mutex_unlock(&fifo->lock);
return 0;
vdev_fail:
mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
fifo->vdev[vdev_id] = NULL;
if (reg_dev)
put_device(&tm_vdev->vdev.dev);
else
kfree(tm_vdev);
fail:
mutex_unlock(&fifo->lock);
return ret;
}
/* Delete vdev for the FIFO. */
static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
{
struct mlxbf_tmfifo_vdev *tm_vdev;
mutex_lock(&fifo->lock);
/* Unregister vdev. */
tm_vdev = fifo->vdev[vdev_id];
if (tm_vdev) {
unregister_virtio_device(&tm_vdev->vdev);
mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
fifo->vdev[vdev_id] = NULL;
}
mutex_unlock(&fifo->lock);
return 0;
}
/* Read the configured network MAC address from efi variable. */
static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
{
efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
unsigned long size = ETH_ALEN;
u8 buf[ETH_ALEN];
efi_status_t rc;
rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
if (rc == EFI_SUCCESS && size == ETH_ALEN)
ether_addr_copy(mac, buf);
else
ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
}
/* Set TmFifo thresolds which is used to trigger interrupts. */
static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
{
u64 ctl;
/* Get Tx FIFO size and set the low/high watermark. */
ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
fifo->tx_fifo_size =
FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
fifo->tx_fifo_size / 2);
ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
fifo->tx_fifo_size - 1);
writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
/* Get Rx FIFO size and set the low/high watermark. */
ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
fifo->rx_fifo_size =
FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
}
static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
{
int i;
fifo->is_ready = false;
del_timer_sync(&fifo->timer);
mlxbf_tmfifo_disable_irqs(fifo);
cancel_work_sync(&fifo->work);
for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
mlxbf_tmfifo_delete_vdev(fifo, i);
}
/* Probe the TMFIFO. */
static int mlxbf_tmfifo_probe(struct platform_device *pdev)
{
struct virtio_net_config net_config;
struct device *dev = &pdev->dev;
struct mlxbf_tmfifo *fifo;
int i, rc;
fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
if (!fifo)
return -ENOMEM;
spin_lock_init(&fifo->spin_lock);
INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
mutex_init(&fifo->lock);
/* Get the resource of the Rx FIFO. */
fifo->rx_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fifo->rx_base))
return PTR_ERR(fifo->rx_base);
/* Get the resource of the Tx FIFO. */
fifo->tx_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(fifo->tx_base))
return PTR_ERR(fifo->tx_base);
platform_set_drvdata(pdev, fifo);
timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
fifo->irq_info[i].index = i;
fifo->irq_info[i].fifo = fifo;
fifo->irq_info[i].irq = platform_get_irq(pdev, i);
rc = devm_request_irq(dev, fifo->irq_info[i].irq,
mlxbf_tmfifo_irq_handler, 0,
"tmfifo", &fifo->irq_info[i]);
if (rc) {
dev_err(dev, "devm_request_irq failed\n");
fifo->irq_info[i].irq = 0;
return rc;
}
}
mlxbf_tmfifo_set_threshold(fifo);
/* Create the console vdev. */
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
if (rc)
goto fail;
/* Create the network vdev. */
memset(&net_config, 0, sizeof(net_config));
net_config.mtu = ETH_DATA_LEN;
net_config.status = VIRTIO_NET_S_LINK_UP;
mlxbf_tmfifo_get_cfg_mac(net_config.mac);
rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
MLXBF_TMFIFO_NET_FEATURES, &net_config,
sizeof(net_config));
if (rc)
goto fail;
mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
fifo->is_ready = true;
return 0;
fail:
mlxbf_tmfifo_cleanup(fifo);
return rc;
}
/* Device remove function. */
static int mlxbf_tmfifo_remove(struct platform_device *pdev)
{
struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
mlxbf_tmfifo_cleanup(fifo);
return 0;
}
static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
{ "MLNXBF01", 0 },
{}
};
MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
static struct platform_driver mlxbf_tmfifo_driver = {
.probe = mlxbf_tmfifo_probe,
.remove = mlxbf_tmfifo_remove,
.driver = {
.name = "bf-tmfifo",
.acpi_match_table = mlxbf_tmfifo_acpi_match,
},
};
module_platform_driver(mlxbf_tmfifo_driver);
MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Mellanox Technologies");
...@@ -1263,6 +1263,17 @@ config INTEL_CHTDC_TI_PWRBTN ...@@ -1263,6 +1263,17 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn. will be called intel_chtdc_ti_pwrbtn.
config INTEL_MRFLD_PWRBTN
tristate "Intel Merrifield Basin Cove power button driver"
depends on INTEL_SOC_PMIC_MRFLD
depends on INPUT
---help---
This option adds a power button driver for Basin Cove PMIC
on Intel Merrifield devices.
To compile this driver as a module, choose M here: the module
will be called intel_mrfld_pwrbtn.
config I2C_MULTI_INSTANTIATE config I2C_MULTI_INSTANTIATE
tristate "I2C multi instantiate pseudo device driver" tristate "I2C multi instantiate pseudo device driver"
depends on I2C && ACPI depends on I2C && ACPI
......
...@@ -94,6 +94,7 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o ...@@ -94,6 +94,7 @@ obj-$(CONFIG_PMC_ATOM) += pmc_atom.o
obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o
obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o
obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o
obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o
obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
...@@ -522,23 +522,22 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args, ...@@ -522,23 +522,22 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
input.length = (acpi_size) sizeof(*in_args); input.length = (acpi_size) sizeof(*in_args);
input.pointer = in_args; input.pointer = in_args;
if (out_data != NULL) { if (out_data) {
output.length = ACPI_ALLOCATE_BUFFER; output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; output.pointer = NULL;
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0, status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, &output); command, &input, &output);
} else if (ACPI_SUCCESS(status)) {
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32)obj->integer.value;
}
kfree(output.pointer);
} else {
status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0, status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
command, &input, NULL); command, &input, NULL);
if (ACPI_SUCCESS(status) && out_data != NULL) {
obj = (union acpi_object *)output.pointer;
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32) obj->integer.value;
} }
kfree(output.pointer);
return status; return status;
} }
/* /*
...@@ -588,7 +587,7 @@ static ssize_t show_hdmi_source(struct device *dev, ...@@ -588,7 +587,7 @@ static ssize_t show_hdmi_source(struct device *dev,
return scnprintf(buf, PAGE_SIZE, return scnprintf(buf, PAGE_SIZE,
"input [gpu] unknown\n"); "input [gpu] unknown\n");
} }
pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data); pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n"); return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
} }
......
...@@ -66,10 +66,13 @@ MODULE_LICENSE("GPL"); ...@@ -66,10 +66,13 @@ MODULE_LICENSE("GPL");
#define NOTIFY_BRNUP_MAX 0x1f #define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20 #define NOTIFY_BRNDOWN_MIN 0x20
#define NOTIFY_BRNDOWN_MAX 0x2e #define NOTIFY_BRNDOWN_MAX 0x2e
#define NOTIFY_FNLOCK_TOGGLE 0x4e
#define NOTIFY_KBD_BRTUP 0xc4 #define NOTIFY_KBD_BRTUP 0xc4
#define NOTIFY_KBD_BRTDWN 0xc5 #define NOTIFY_KBD_BRTDWN 0xc5
#define NOTIFY_KBD_BRTTOGGLE 0xc7 #define NOTIFY_KBD_BRTTOGGLE 0xc7
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
#define ASUS_FAN_DESC "cpu_fan" #define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13 #define ASUS_FAN_MFUN 0x13
#define ASUS_FAN_SFUN_READ 0x06 #define ASUS_FAN_SFUN_READ 0x06
...@@ -177,6 +180,8 @@ struct asus_wmi { ...@@ -177,6 +180,8 @@ struct asus_wmi {
struct workqueue_struct *hotplug_workqueue; struct workqueue_struct *hotplug_workqueue;
struct work_struct hotplug_work; struct work_struct hotplug_work;
bool fnlock_locked;
struct asus_wmi_debug debug; struct asus_wmi_debug debug;
struct asus_wmi_driver *driver; struct asus_wmi_driver *driver;
...@@ -1619,6 +1624,23 @@ static int is_display_toggle(int code) ...@@ -1619,6 +1624,23 @@ static int is_display_toggle(int code)
return 0; return 0;
} }
static bool asus_wmi_has_fnlock_key(struct asus_wmi *asus)
{
u32 result;
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FNLOCK, &result);
return (result & ASUS_WMI_DSTS_PRESENCE_BIT) &&
!(result & ASUS_WMI_FNLOCK_BIOS_DISABLED);
}
static void asus_wmi_fnlock_update(struct asus_wmi *asus)
{
int mode = asus->fnlock_locked;
asus_wmi_set_devstate(ASUS_WMI_DEVID_FNLOCK, mode, NULL);
}
static void asus_wmi_notify(u32 value, void *context) static void asus_wmi_notify(u32 value, void *context)
{ {
struct asus_wmi *asus = context; struct asus_wmi *asus = context;
...@@ -1680,6 +1702,12 @@ static void asus_wmi_notify(u32 value, void *context) ...@@ -1680,6 +1702,12 @@ static void asus_wmi_notify(u32 value, void *context)
goto exit; goto exit;
} }
if (code == NOTIFY_FNLOCK_TOGGLE) {
asus->fnlock_locked = !asus->fnlock_locked;
asus_wmi_fnlock_update(asus);
goto exit;
}
if (is_display_toggle(code) && if (is_display_toggle(code) &&
asus->driver->quirks->no_display_toggle) asus->driver->quirks->no_display_toggle)
goto exit; goto exit;
...@@ -2134,6 +2162,11 @@ static int asus_wmi_add(struct platform_device *pdev) ...@@ -2134,6 +2162,11 @@ static int asus_wmi_add(struct platform_device *pdev)
} else } else
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL); err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
if (asus_wmi_has_fnlock_key(asus)) {
asus->fnlock_locked = true;
asus_wmi_fnlock_update(asus);
}
status = wmi_install_notify_handler(asus->driver->event_guid, status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus); asus_wmi_notify, asus);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -2213,6 +2246,8 @@ static int asus_hotk_resume(struct device *device) ...@@ -2213,6 +2246,8 @@ static int asus_hotk_resume(struct device *device)
if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus); kbd_led_update(asus);
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
return 0; return 0;
} }
...@@ -2249,6 +2284,8 @@ static int asus_hotk_restore(struct device *device) ...@@ -2249,6 +2284,8 @@ static int asus_hotk_restore(struct device *device)
if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
kbd_led_update(asus); kbd_led_update(asus);
if (asus_wmi_has_fnlock_key(asus))
asus_wmi_fnlock_update(asus);
return 0; return 0;
} }
......
...@@ -531,7 +531,7 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data) ...@@ -531,7 +531,7 @@ static void dell_rfkill_query(struct rfkill *rfkill, void *data)
return; return;
} }
dell_fill_request(&buffer, 0, 0x2, 0, 0); dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
hwswitch = buffer.output[1]; hwswitch = buffer.output[1];
...@@ -562,7 +562,7 @@ static int dell_debugfs_show(struct seq_file *s, void *data) ...@@ -562,7 +562,7 @@ static int dell_debugfs_show(struct seq_file *s, void *data)
return ret; return ret;
status = buffer.output[1]; status = buffer.output[1];
dell_fill_request(&buffer, 0, 0x2, 0, 0); dell_fill_request(&buffer, 0x2, 0, 0, 0);
hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); hwswitch_ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (hwswitch_ret) if (hwswitch_ret)
return hwswitch_ret; return hwswitch_ret;
...@@ -647,7 +647,7 @@ static void dell_update_rfkill(struct work_struct *ignored) ...@@ -647,7 +647,7 @@ static void dell_update_rfkill(struct work_struct *ignored)
if (ret != 0) if (ret != 0)
return; return;
dell_fill_request(&buffer, 0, 0x2, 0, 0); dell_fill_request(&buffer, 0x2, 0, 0, 0);
ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL); ret = dell_send_request(&buffer, CLASS_INFO, SELECT_RFKILL);
if (ret == 0 && (status & BIT(0))) if (ret == 0 && (status & BIT(0)))
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/rfkill.h> #include <linux/rfkill.h>
#include <linux/input.h> #include <linux/input.h>
#include "dell-rbtn.h"
enum rbtn_type { enum rbtn_type {
RBTN_UNKNOWN, RBTN_UNKNOWN,
RBTN_TOGGLE, RBTN_TOGGLE,
......
...@@ -980,312 +980,21 @@ static void ideapad_wmi_notify(u32 value, void *context) ...@@ -980,312 +980,21 @@ static void ideapad_wmi_notify(u32 value, void *context)
#endif #endif
/* /*
* Some ideapads don't have a hardware rfkill switch, reading VPCCMD_R_RF * Some ideapads have a hardware rfkill switch, but most do not have one.
* always results in 0 on these models, causing ideapad_laptop to wrongly * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill,
* report all radios as hardware-blocked. * switch causing ideapad_laptop to wrongly report all radios as hw-blocked.
* There used to be a long list of DMI ids for models without a hw rfkill
* switch here, but that resulted in playing whack a mole.
* More importantly wrongly reporting the wifi radio as hw-blocked, results in
* non working wifi. Whereas not reporting it hw-blocked, when it actually is
* hw-blocked results in an empty SSID list, which is a much more benign
* failure mode.
* So the default now is the much safer option of assuming there is no
* hardware rfkill switch. This default also actually matches most hardware,
* since having a hw rfkill switch is quite rare on modern hardware, so this
* also leads to a much shorter list.
*/ */
static const struct dmi_system_id no_hw_rfkill_list[] = { static const struct dmi_system_id hw_rfkill_list[] = {
{
.ident = "Lenovo RESCUER R720-15IKBN",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"),
},
},
{
.ident = "Lenovo G40-30",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
},
},
{
.ident = "Lenovo G50-30",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
},
},
{
.ident = "Lenovo V310-14IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14IKB"),
},
},
{
.ident = "Lenovo V310-14ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14ISK"),
},
},
{
.ident = "Lenovo V310-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15IKB"),
},
},
{
.ident = "Lenovo V310-15ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"),
},
},
{
.ident = "Lenovo V510-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V510-15IKB"),
},
},
{
.ident = "Lenovo ideapad 300-15IBR",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IBR"),
},
},
{
.ident = "Lenovo ideapad 300-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IKB"),
},
},
{
.ident = "Lenovo ideapad 300S-11IBR",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300S-11BR"),
},
},
{
.ident = "Lenovo ideapad 310-15ABR",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ABR"),
},
},
{
.ident = "Lenovo ideapad 310-15IAP",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IAP"),
},
},
{
.ident = "Lenovo ideapad 310-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"),
},
},
{
.ident = "Lenovo ideapad 310-15ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ISK"),
},
},
{
.ident = "Lenovo ideapad 330-15ICH",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 330-15ICH"),
},
},
{
.ident = "Lenovo ideapad 530S-14ARR",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 530S-14ARR"),
},
},
{
.ident = "Lenovo ideapad S130-14IGM",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad S130-14IGM"),
},
},
{
.ident = "Lenovo ideapad Y700-14ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-14ISK"),
},
},
{
.ident = "Lenovo ideapad Y700-15ACZ",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ACZ"),
},
},
{
.ident = "Lenovo ideapad Y700-15ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
},
},
{
.ident = "Lenovo ideapad Y700 Touch-15ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
},
},
{
.ident = "Lenovo ideapad Y700-17ISK",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
},
},
{
.ident = "Lenovo ideapad MIIX 720-12IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 720-12IKB"),
},
},
{
.ident = "Lenovo Legion Y520-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKB"),
},
},
{
.ident = "Lenovo Y520-15IKBM",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y520-15IKBM"),
},
},
{
.ident = "Lenovo Legion Y530-15ICH",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH"),
},
},
{
.ident = "Lenovo Legion Y530-15ICH-1060",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion Y530-15ICH-1060"),
},
},
{
.ident = "Lenovo Legion Y720-15IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKB"),
},
},
{
.ident = "Lenovo Legion Y720-15IKBN",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBN"),
},
},
{
.ident = "Lenovo Y720-15IKBM",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Y720-15IKBM"),
},
},
{
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"),
},
},
{
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "Yoga2"),
},
},
{
.ident = "Lenovo Yoga 2 13",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Yoga 2 13"),
},
},
{
.ident = "Lenovo Yoga 3 1170 / 1470",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3"),
},
},
{
.ident = "Lenovo Yoga 3 Pro 1370",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"),
},
},
{
.ident = "Lenovo Yoga 700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
},
},
{
.ident = "Lenovo Yoga 900",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
},
},
{
.ident = "Lenovo Yoga 900",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "VIUU4"),
},
},
{
.ident = "Lenovo YOGA 910-13IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
},
},
{
.ident = "Lenovo YOGA 920-13IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"),
},
},
{
.ident = "Lenovo YOGA C930-13IKB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA C930-13IKB"),
},
},
{
.ident = "Lenovo Zhaoyang E42-80",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ZHAOYANG E42-80"),
},
},
{} {}
}; };
...@@ -1311,7 +1020,7 @@ static int ideapad_acpi_add(struct platform_device *pdev) ...@@ -1311,7 +1020,7 @@ static int ideapad_acpi_add(struct platform_device *pdev)
priv->cfg = cfg; priv->cfg = cfg;
priv->adev = adev; priv->adev = adev;
priv->platform_device = pdev; priv->platform_device = pdev;
priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list); priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
ret = ideapad_sysfs_init(priv); ret = ideapad_sysfs_init(priv);
if (ret) if (ret)
......
// SPDX-License-Identifier: GPL-2.0
/*
* Power-button driver for Basin Cove PMIC
*
* Copyright (c) 2019, Intel Corporation.
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_mrfld.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/slab.h>
#define BCOVE_PBSTATUS 0x27
#define BCOVE_PBSTATUS_PBLVL BIT(4) /* 1 - release, 0 - press */
static irqreturn_t mrfld_pwrbtn_interrupt(int irq, void *dev_id)
{
struct input_dev *input = dev_id;
struct device *dev = input->dev.parent;
struct regmap *regmap = dev_get_drvdata(dev);
unsigned int state;
int ret;
ret = regmap_read(regmap, BCOVE_PBSTATUS, &state);
if (ret)
return IRQ_NONE;
dev_dbg(dev, "PBSTATUS=0x%x\n", state);
input_report_key(input, KEY_POWER, !(state & BCOVE_PBSTATUS_PBLVL));
input_sync(input);
regmap_update_bits(regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
return IRQ_HANDLED;
}
static int mrfld_pwrbtn_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
struct input_dev *input;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
input = devm_input_allocate_device(dev);
if (!input)
return -ENOMEM;
input->name = pdev->name;
input->phys = "power-button/input0";
input->id.bustype = BUS_HOST;
input->dev.parent = dev;
input_set_capability(input, EV_KEY, KEY_POWER);
ret = input_register_device(input);
if (ret)
return ret;
dev_set_drvdata(dev, pmic->regmap);
ret = devm_request_threaded_irq(dev, irq, NULL, mrfld_pwrbtn_interrupt,
IRQF_ONESHOT | IRQF_SHARED, pdev->name,
input);
if (ret)
return ret;
regmap_update_bits(pmic->regmap, BCOVE_MIRQLVL1, BCOVE_LVL1_PWRBTN, 0);
regmap_update_bits(pmic->regmap, BCOVE_MPBIRQ, BCOVE_PBIRQ_PBTN, 0);
device_init_wakeup(dev, true);
dev_pm_set_wake_irq(dev, irq);
return 0;
}
static int mrfld_pwrbtn_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
return 0;
}
static const struct platform_device_id mrfld_pwrbtn_id_table[] = {
{ .name = "mrfld_bcove_pwrbtn" },
{}
};
MODULE_DEVICE_TABLE(platform, mrfld_pwrbtn_id_table);
static struct platform_driver mrfld_pwrbtn_driver = {
.driver = {
.name = "mrfld_bcove_pwrbtn",
},
.probe = mrfld_pwrbtn_probe,
.remove = mrfld_pwrbtn_remove,
.id_table = mrfld_pwrbtn_id_table,
};
module_platform_driver(mrfld_pwrbtn_driver);
MODULE_DESCRIPTION("Power-button driver for Basin Cove PMIC");
MODULE_LICENSE("GPL v2");
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
...@@ -828,7 +830,7 @@ static const struct pci_device_id pmc_pci_ids[] = { ...@@ -828,7 +830,7 @@ static const struct pci_device_id pmc_pci_ids[] = {
* the platform BIOS enforces 24Mhx Crystal to shutdown * the platform BIOS enforces 24Mhx Crystal to shutdown
* before PMC can assert SLP_S0#. * before PMC can assert SLP_S0#.
*/ */
int quirk_xtal_ignore(const struct dmi_system_id *id) static int quirk_xtal_ignore(const struct dmi_system_id *id)
{ {
struct pmc_dev *pmcdev = &pmc; struct pmc_dev *pmcdev = &pmc;
u32 value; u32 value;
...@@ -854,13 +856,17 @@ static const struct dmi_system_id pmc_core_dmi_table[] = { ...@@ -854,13 +856,17 @@ static const struct dmi_system_id pmc_core_dmi_table[] = {
{} {}
}; };
static int __init pmc_core_probe(void) static int pmc_core_probe(struct platform_device *pdev)
{ {
static bool device_initialized;
struct pmc_dev *pmcdev = &pmc; struct pmc_dev *pmcdev = &pmc;
const struct x86_cpu_id *cpu_id; const struct x86_cpu_id *cpu_id;
u64 slp_s0_addr; u64 slp_s0_addr;
int err; int err;
if (device_initialized)
return -ENODEV;
cpu_id = x86_match_cpu(intel_pmc_core_ids); cpu_id = x86_match_cpu(intel_pmc_core_ids);
if (!cpu_id) if (!cpu_id)
return -ENODEV; return -ENODEV;
...@@ -886,30 +892,178 @@ static int __init pmc_core_probe(void) ...@@ -886,30 +892,178 @@ static int __init pmc_core_probe(void)
return -ENOMEM; return -ENOMEM;
mutex_init(&pmcdev->lock); mutex_init(&pmcdev->lock);
platform_set_drvdata(pdev, pmcdev);
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit(); pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
dmi_check_system(pmc_core_dmi_table);
err = pmc_core_dbgfs_register(pmcdev); err = pmc_core_dbgfs_register(pmcdev);
if (err < 0) { if (err < 0) {
pr_warn(" debugfs register failed.\n"); dev_warn(&pdev->dev, "debugfs register failed.\n");
iounmap(pmcdev->regbase); iounmap(pmcdev->regbase);
return err; return err;
} }
dmi_check_system(pmc_core_dmi_table); device_initialized = true;
pr_info(" initialized\n"); dev_info(&pdev->dev, " initialized\n");
return 0; return 0;
} }
module_init(pmc_core_probe)
static void __exit pmc_core_remove(void) static int pmc_core_remove(struct platform_device *pdev)
{ {
struct pmc_dev *pmcdev = &pmc; struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
pmc_core_dbgfs_unregister(pmcdev); pmc_core_dbgfs_unregister(pmcdev);
platform_set_drvdata(pdev, NULL);
mutex_destroy(&pmcdev->lock); mutex_destroy(&pmcdev->lock);
iounmap(pmcdev->regbase); iounmap(pmcdev->regbase);
return 0;
} }
module_exit(pmc_core_remove)
#ifdef CONFIG_PM_SLEEP
static bool warn_on_s0ix_failures;
module_param(warn_on_s0ix_failures, bool, 0644);
MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
static int pmc_core_suspend(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
pmcdev->check_counters = false;
/* No warnings on S0ix failures */
if (!warn_on_s0ix_failures)
return 0;
/* Check if the syspend will actually use S0ix */
if (pm_suspend_via_firmware())
return 0;
/* Save PC10 residency for checking later */
if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
return -EIO;
/* Save S0ix residency for checking later */
if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
return -EIO;
pmcdev->check_counters = true;
return 0;
}
static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
{
u64 pc10_counter;
if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
return false;
if (pc10_counter == pmcdev->pc10_counter)
return true;
return false;
}
static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
{
u64 s0ix_counter;
if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
return false;
if (s0ix_counter == pmcdev->s0ix_counter)
return true;
return false;
}
static int pmc_core_resume(struct device *dev)
{
struct pmc_dev *pmcdev = dev_get_drvdata(dev);
const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
int offset = pmcdev->map->slps0_dbg_offset;
const struct pmc_bit_map *map;
u32 data;
if (!pmcdev->check_counters)
return 0;
if (!pmc_core_is_s0ix_failed(pmcdev))
return 0;
if (pmc_core_is_pc10_failed(pmcdev)) {
/* S0ix failed because of PC10 entry failure */
dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
pmcdev->pc10_counter);
return 0;
}
/* The real interesting case - S0ix failed - lets ask PMC why. */
dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
pmcdev->s0ix_counter);
while (*maps) {
map = *maps;
data = pmc_core_reg_read(pmcdev, offset);
offset += 4;
while (map->name) {
dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
map->name,
data & map->bit_mask ? "Yes" : "No");
map++;
}
maps++;
}
return 0;
}
#endif
static const struct dev_pm_ops pmc_core_pm_ops = {
SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
};
static struct platform_driver pmc_core_driver = {
.driver = {
.name = "intel_pmc_core",
.pm = &pmc_core_pm_ops,
},
.probe = pmc_core_probe,
.remove = pmc_core_remove,
};
static struct platform_device pmc_core_device = {
.name = "intel_pmc_core",
};
static int __init pmc_core_init(void)
{
int ret;
if (!x86_match_cpu(intel_pmc_core_ids))
return -ENODEV;
ret = platform_driver_register(&pmc_core_driver);
if (ret)
return ret;
ret = platform_device_register(&pmc_core_device);
if (ret) {
platform_driver_unregister(&pmc_core_driver);
return ret;
}
return 0;
}
static void __exit pmc_core_exit(void)
{
platform_device_unregister(&pmc_core_device);
platform_driver_unregister(&pmc_core_driver);
}
module_init(pmc_core_init)
module_exit(pmc_core_exit)
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Intel PMC Core Driver"); MODULE_DESCRIPTION("Intel PMC Core Driver");
...@@ -241,6 +241,9 @@ struct pmc_reg_map { ...@@ -241,6 +241,9 @@ struct pmc_reg_map {
* @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers * @pmc_xram_read_bit: flag to indicate whether PMC XRAM shadow registers
* used to read MPHY PG and PLL status are available * used to read MPHY PG and PLL status are available
* @mutex_lock: mutex to complete one transcation * @mutex_lock: mutex to complete one transcation
* @check_counters: On resume, check if counters are getting incremented
* @pc10_counter: PC10 residency counter
* @s0ix_counter: S0ix residency (step adjusted)
* *
* pmc_dev contains info about power management controller device. * pmc_dev contains info about power management controller device.
*/ */
...@@ -253,6 +256,10 @@ struct pmc_dev { ...@@ -253,6 +256,10 @@ struct pmc_dev {
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
int pmc_xram_read_bit; int pmc_xram_read_bit;
struct mutex lock; /* generic mutex lock for PMC Core */ struct mutex lock; /* generic mutex lock for PMC Core */
bool check_counters; /* Check for counter increments on resume */
u64 pc10_counter;
u64 s0ix_counter;
}; };
#endif /* PMC_CORE_H */ #endif /* PMC_CORE_H */
...@@ -40,14 +40,14 @@ ...@@ -40,14 +40,14 @@
* The ARC handles the interrupt and services it, writing optional data to * The ARC handles the interrupt and services it, writing optional data to
* the IPC1 registers, updates the IPC_STS response register with the status. * the IPC1 registers, updates the IPC_STS response register with the status.
*/ */
#define IPC_CMD 0x0 #define IPC_CMD 0x00
#define IPC_CMD_MSI 0x100 #define IPC_CMD_MSI BIT(8)
#define IPC_CMD_SIZE 16 #define IPC_CMD_SIZE 16
#define IPC_CMD_SUBCMD 12 #define IPC_CMD_SUBCMD 12
#define IPC_STATUS 0x04 #define IPC_STATUS 0x04
#define IPC_STATUS_IRQ 0x4 #define IPC_STATUS_IRQ BIT(2)
#define IPC_STATUS_ERR 0x2 #define IPC_STATUS_ERR BIT(1)
#define IPC_STATUS_BUSY 0x1 #define IPC_STATUS_BUSY BIT(0)
#define IPC_SPTR 0x08 #define IPC_SPTR 0x08
#define IPC_DPTR 0x0C #define IPC_DPTR 0x0C
#define IPC_WRITE_BUFFER 0x80 #define IPC_WRITE_BUFFER 0x80
...@@ -101,13 +101,13 @@ ...@@ -101,13 +101,13 @@
#define TELEM_SSRAM_SIZE 240 #define TELEM_SSRAM_SIZE 240
#define TELEM_PMC_SSRAM_OFFSET 0x1B00 #define TELEM_PMC_SSRAM_OFFSET 0x1B00
#define TELEM_PUNIT_SSRAM_OFFSET 0x1A00 #define TELEM_PUNIT_SSRAM_OFFSET 0x1A00
#define TCO_PMC_OFFSET 0x8 #define TCO_PMC_OFFSET 0x08
#define TCO_PMC_SIZE 0x4 #define TCO_PMC_SIZE 0x04
/* PMC register bit definitions */ /* PMC register bit definitions */
/* PMC_CFG_REG bit masks */ /* PMC_CFG_REG bit masks */
#define PMC_CFG_NO_REBOOT_MASK (1 << 4) #define PMC_CFG_NO_REBOOT_MASK BIT_MASK(4)
#define PMC_CFG_NO_REBOOT_EN (1 << 4) #define PMC_CFG_NO_REBOOT_EN (1 << 4)
#define PMC_CFG_NO_REBOOT_DIS (0 << 4) #define PMC_CFG_NO_REBOOT_DIS (0 << 4)
...@@ -131,6 +131,7 @@ static struct intel_pmc_ipc_dev { ...@@ -131,6 +131,7 @@ static struct intel_pmc_ipc_dev {
/* punit */ /* punit */
struct platform_device *punit_dev; struct platform_device *punit_dev;
unsigned int punit_res_count;
/* Telemetry */ /* Telemetry */
resource_size_t telem_pmc_ssram_base; resource_size_t telem_pmc_ssram_base;
...@@ -682,7 +683,7 @@ static int ipc_create_punit_device(void) ...@@ -682,7 +683,7 @@ static int ipc_create_punit_device(void)
.name = PUNIT_DEVICE_NAME, .name = PUNIT_DEVICE_NAME,
.id = -1, .id = -1,
.res = punit_res_array, .res = punit_res_array,
.num_res = ARRAY_SIZE(punit_res_array), .num_res = ipcdev.punit_res_count,
}; };
pdev = platform_device_register_full(&pdevinfo); pdev = platform_device_register_full(&pdevinfo);
...@@ -771,13 +772,17 @@ static int ipc_create_pmc_devices(void) ...@@ -771,13 +772,17 @@ static int ipc_create_pmc_devices(void)
if (ret) { if (ret) {
dev_err(ipcdev.dev, "Failed to add punit platform device\n"); dev_err(ipcdev.dev, "Failed to add punit platform device\n");
platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.tco_dev);
return ret;
} }
if (!ipcdev.telem_res_inval) { if (!ipcdev.telem_res_inval) {
ret = ipc_create_telemetry_device(); ret = ipc_create_telemetry_device();
if (ret) if (ret) {
dev_warn(ipcdev.dev, dev_warn(ipcdev.dev,
"Failed to add telemetry platform device\n"); "Failed to add telemetry platform device\n");
platform_device_unregister(ipcdev.punit_dev);
platform_device_unregister(ipcdev.tco_dev);
}
} }
return ret; return ret;
...@@ -785,7 +790,7 @@ static int ipc_create_pmc_devices(void) ...@@ -785,7 +790,7 @@ static int ipc_create_pmc_devices(void)
static int ipc_plat_get_res(struct platform_device *pdev) static int ipc_plat_get_res(struct platform_device *pdev)
{ {
struct resource *res, *punit_res; struct resource *res, *punit_res = punit_res_array;
void __iomem *addr; void __iomem *addr;
int size; int size;
...@@ -800,7 +805,8 @@ static int ipc_plat_get_res(struct platform_device *pdev) ...@@ -800,7 +805,8 @@ static int ipc_plat_get_res(struct platform_device *pdev)
ipcdev.acpi_io_size = size; ipcdev.acpi_io_size = size;
dev_info(&pdev->dev, "io res: %pR\n", res); dev_info(&pdev->dev, "io res: %pR\n", res);
punit_res = punit_res_array; ipcdev.punit_res_count = 0;
/* This is index 0 to cover BIOS data register */ /* This is index 0 to cover BIOS data register */
res = platform_get_resource(pdev, IORESOURCE_MEM, res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_BIOS_DATA_INDEX); PLAT_RESOURCE_BIOS_DATA_INDEX);
...@@ -808,7 +814,7 @@ static int ipc_plat_get_res(struct platform_device *pdev) ...@@ -808,7 +814,7 @@ static int ipc_plat_get_res(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get res of punit BIOS data\n"); dev_err(&pdev->dev, "Failed to get res of punit BIOS data\n");
return -ENXIO; return -ENXIO;
} }
*punit_res = *res; punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res); dev_info(&pdev->dev, "punit BIOS data res: %pR\n", res);
/* This is index 1 to cover BIOS interface register */ /* This is index 1 to cover BIOS interface register */
...@@ -818,42 +824,38 @@ static int ipc_plat_get_res(struct platform_device *pdev) ...@@ -818,42 +824,38 @@ static int ipc_plat_get_res(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n"); dev_err(&pdev->dev, "Failed to get res of punit BIOS iface\n");
return -ENXIO; return -ENXIO;
} }
*++punit_res = *res; punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res); dev_info(&pdev->dev, "punit BIOS interface res: %pR\n", res);
/* This is index 2 to cover ISP data register, optional */ /* This is index 2 to cover ISP data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_DATA_INDEX); PLAT_RESOURCE_ISP_DATA_INDEX);
++punit_res;
if (res) { if (res) {
*punit_res = *res; punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit ISP data res: %pR\n", res); dev_info(&pdev->dev, "punit ISP data res: %pR\n", res);
} }
/* This is index 3 to cover ISP interface register, optional */ /* This is index 3 to cover ISP interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_ISP_IFACE_INDEX); PLAT_RESOURCE_ISP_IFACE_INDEX);
++punit_res;
if (res) { if (res) {
*punit_res = *res; punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res); dev_info(&pdev->dev, "punit ISP interface res: %pR\n", res);
} }
/* This is index 4 to cover GTD data register, optional */ /* This is index 4 to cover GTD data register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_DATA_INDEX); PLAT_RESOURCE_GTD_DATA_INDEX);
++punit_res;
if (res) { if (res) {
*punit_res = *res; punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit GTD data res: %pR\n", res); dev_info(&pdev->dev, "punit GTD data res: %pR\n", res);
} }
/* This is index 5 to cover GTD interface register, optional */ /* This is index 5 to cover GTD interface register, optional */
res = platform_get_resource(pdev, IORESOURCE_MEM, res = platform_get_resource(pdev, IORESOURCE_MEM,
PLAT_RESOURCE_GTD_IFACE_INDEX); PLAT_RESOURCE_GTD_IFACE_INDEX);
++punit_res;
if (res) { if (res) {
*punit_res = *res; punit_res[ipcdev.punit_res_count++] = *res;
dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res); dev_info(&pdev->dev, "punit GTD interface res: %pR\n", res);
} }
......
...@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev) ...@@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev)
* - GTDRIVER_IPC BASE_IFACE * - GTDRIVER_IPC BASE_IFACE
*/ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2); res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (res && resource_size(res) > 1) { if (res) {
addr = devm_ioremap_resource(&pdev->dev, res); addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr)) if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 3); res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
if (res && resource_size(res) > 1) { if (res) {
addr = devm_ioremap_resource(&pdev->dev, res); addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr)) if (!IS_ERR(addr))
punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 4); res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
if (res && resource_size(res) > 1) { if (res) {
addr = devm_ioremap_resource(&pdev->dev, res); addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr)) if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr;
} }
res = platform_get_resource(pdev, IORESOURCE_MEM, 5); res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
if (res && resource_size(res) > 1) { if (res) {
addr = devm_ioremap_resource(&pdev->dev, res); addr = devm_ioremap_resource(&pdev->dev, res);
if (!IS_ERR(addr)) if (!IS_ERR(addr))
punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr;
......
...@@ -56,6 +56,16 @@ ...@@ -56,6 +56,16 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88 #define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89 #define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a #define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
#define MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET 0xcb
#define MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET 0xcd
#define MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET 0xce
#define MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET 0xcf
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
#define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3
#define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4
#define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5
...@@ -72,6 +82,7 @@ ...@@ -72,6 +82,7 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5 #define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6 #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
...@@ -128,6 +139,18 @@ ...@@ -128,6 +139,18 @@
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14 #define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
/* Masks and default values for watchdogs */
#define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1)
#define MLXPLAT_CPLD_WD2_CLEAR_MASK (GENMASK(7, 0) & ~BIT(1))
#define MLXPLAT_CPLD_WD_TYPE1_TO_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_WD_TYPE2_TO_MASK 0
#define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1)
#define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4))
#define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7))
#define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30
#define MLXPLAT_CPLD_WD_MAX_DEVS 2
/* mlxplat_priv - platform private data /* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device * @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices * @pdev_mux - array of mux platform devices
...@@ -135,6 +158,7 @@ ...@@ -135,6 +158,7 @@
* @pdev_led - led platform devices * @pdev_led - led platform devices
* @pdev_io_regs - register access platform devices * @pdev_io_regs - register access platform devices
* @pdev_fan - FAN platform devices * @pdev_fan - FAN platform devices
* @pdev_wd - array of watchdog platform devices
*/ */
struct mlxplat_priv { struct mlxplat_priv {
struct platform_device *pdev_i2c; struct platform_device *pdev_i2c;
...@@ -143,6 +167,7 @@ struct mlxplat_priv { ...@@ -143,6 +167,7 @@ struct mlxplat_priv {
struct platform_device *pdev_led; struct platform_device *pdev_led;
struct platform_device *pdev_io_regs; struct platform_device *pdev_io_regs;
struct platform_device *pdev_fan; struct platform_device *pdev_fan;
struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS];
}; };
/* Regions for LPC I2C controller and LPC base register space */ /* Regions for LPC I2C controller and LPC base register space */
...@@ -1339,6 +1364,10 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = { ...@@ -1339,6 +1364,10 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET, .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
.bit = BIT(3), .bit = BIT(3),
}, },
{
.label = "conf",
.capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET,
},
}; };
static struct mlxreg_core_platform_data mlxplat_default_fan_data = { static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
...@@ -1346,6 +1375,148 @@ static struct mlxreg_core_platform_data mlxplat_default_fan_data = { ...@@ -1346,6 +1375,148 @@ static struct mlxreg_core_platform_data mlxplat_default_fan_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data), .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_data),
}; };
/* Watchdog type1: hardware implementation version1
* (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140 systems).
*/
static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type1[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
.mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
.bit = 0,
},
{
.label = "reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.bit = 6,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type1[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE1_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET,
.mask = MLXPLAT_CPLD_WD1_CLEAR_MASK,
.bit = 1,
},
};
static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type1[] = {
{
.data = mlxplat_mlxcpld_wd_main_regs_type1,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type1),
.version = MLX_WDT_TYPE1,
.identity = "mlx-wdt-main",
},
{
.data = mlxplat_mlxcpld_wd_aux_regs_type1,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type1),
.version = MLX_WDT_TYPE1,
.identity = "mlx-wdt-aux",
},
};
/* Watchdog type2: hardware implementation version 2
* (all systems except (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140).
*/
static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type2[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "timeleft",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK,
.bit = 0,
},
{
.label = "reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(6),
.bit = 6,
},
};
static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type2[] = {
{
.label = "action",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
{
.label = "timeout",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
.health_cntr = MLXPLAT_CPLD_WD_DFLT_TIMEOUT,
},
{
.label = "timeleft",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET,
.mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK,
},
{
.label = "ping",
.reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET,
.mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK,
.bit = 4,
},
};
static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = {
{
.data = mlxplat_mlxcpld_wd_main_regs_type2,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type2),
.version = MLX_WDT_TYPE2,
.identity = "mlx-wdt-main",
},
{
.data = mlxplat_mlxcpld_wd_aux_regs_type2,
.counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type2),
.version = MLX_WDT_TYPE2,
.identity = "mlx-wdt-aux",
},
};
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{ {
switch (reg) { switch (reg) {
...@@ -1368,6 +1539,14 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) ...@@ -1368,6 +1539,14 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true; return true;
...@@ -1411,6 +1590,16 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) ...@@ -1411,6 +1590,16 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
...@@ -1428,6 +1617,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) ...@@ -1428,6 +1617,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
return true; return true;
} }
return false; return false;
...@@ -1467,6 +1657,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) ...@@ -1467,6 +1657,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
...@@ -1484,6 +1678,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) ...@@ -1484,6 +1678,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
return true; return true;
} }
return false; return false;
...@@ -1493,6 +1688,7 @@ static const struct reg_default mlxplat_mlxcpld_regmap_default[] = { ...@@ -1493,6 +1688,7 @@ static const struct reg_default mlxplat_mlxcpld_regmap_default[] = {
{ MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 }, { MLXPLAT_CPLD_LPC_REG_WP1_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 }, { MLXPLAT_CPLD_LPC_REG_WP2_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
{ MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 },
}; };
struct mlxplat_mlxcpld_regmap_context { struct mlxplat_mlxcpld_regmap_context {
...@@ -1542,6 +1738,8 @@ static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug; ...@@ -1542,6 +1738,8 @@ static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug;
static struct mlxreg_core_platform_data *mlxplat_led; static struct mlxreg_core_platform_data *mlxplat_led;
static struct mlxreg_core_platform_data *mlxplat_regs_io; static struct mlxreg_core_platform_data *mlxplat_regs_io;
static struct mlxreg_core_platform_data *mlxplat_fan; static struct mlxreg_core_platform_data *mlxplat_fan;
static struct mlxreg_core_platform_data
*mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{ {
...@@ -1557,6 +1755,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) ...@@ -1557,6 +1755,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data; mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_default_regs_io_data; mlxplat_regs_io = &mlxplat_default_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1; return 1;
}; };
...@@ -1575,6 +1774,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) ...@@ -1575,6 +1774,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data; mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data; mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1; return 1;
}; };
...@@ -1593,6 +1793,7 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) ...@@ -1593,6 +1793,7 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_default_led_data; mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data; mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1; return 1;
}; };
...@@ -1611,6 +1812,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) ...@@ -1611,6 +1812,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
mlxplat_led = &mlxplat_msn21xx_led_data; mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data; mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
return 1; return 1;
}; };
...@@ -1630,6 +1832,8 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) ...@@ -1630,6 +1832,8 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_led = &mlxplat_default_ng_led_data; mlxplat_led = &mlxplat_default_ng_led_data;
mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data; mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
return 1; return 1;
}; };
...@@ -1912,15 +2116,33 @@ static int __init mlxplat_init(void) ...@@ -1912,15 +2116,33 @@ static int __init mlxplat_init(void)
} }
} }
/* Add WD drivers. */
for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
if (mlxplat_wd_data[j]) {
mlxplat_wd_data[j]->regmap = mlxplat_hotplug->regmap;
priv->pdev_wd[j] = platform_device_register_resndata(
&mlxplat_dev->dev, "mlx-wdt",
j, NULL, 0,
mlxplat_wd_data[j],
sizeof(*mlxplat_wd_data[j]));
if (IS_ERR(priv->pdev_wd[j])) {
err = PTR_ERR(priv->pdev_wd[j]);
goto fail_platform_wd_register;
}
}
}
/* Sync registers with hardware. */ /* Sync registers with hardware. */
regcache_mark_dirty(mlxplat_hotplug->regmap); regcache_mark_dirty(mlxplat_hotplug->regmap);
err = regcache_sync(mlxplat_hotplug->regmap); err = regcache_sync(mlxplat_hotplug->regmap);
if (err) if (err)
goto fail_platform_fan_register; goto fail_platform_wd_register;
return 0; return 0;
fail_platform_fan_register: fail_platform_wd_register:
while (--j >= 0)
platform_device_unregister(priv->pdev_wd[j]);
if (mlxplat_fan) if (mlxplat_fan)
platform_device_unregister(priv->pdev_fan); platform_device_unregister(priv->pdev_fan);
fail_platform_io_regs_register: fail_platform_io_regs_register:
...@@ -1946,6 +2168,8 @@ static void __exit mlxplat_exit(void) ...@@ -1946,6 +2168,8 @@ static void __exit mlxplat_exit(void)
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev); struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
int i; int i;
for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--)
platform_device_unregister(priv->pdev_wd[i]);
if (priv->pdev_fan) if (priv->pdev_fan)
platform_device_unregister(priv->pdev_fan); platform_device_unregister(priv->pdev_fan);
if (priv->pdev_io_regs) if (priv->pdev_io_regs)
......
...@@ -4424,14 +4424,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context) ...@@ -4424,14 +4424,16 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
} }
return AE_OK; return AE_OK;
} }
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
default: default:
dprintk("Resource %d isn't an IRQ nor an IO port\n", dprintk("Resource %d isn't an IRQ nor an IO port\n",
resource->type); resource->type);
return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_END_TAG:
return AE_OK;
} }
return AE_CTRL_TERMINATE;
} }
static int sony_pic_possible_resources(struct acpi_device *device) static int sony_pic_possible_resources(struct acpi_device *device)
......
...@@ -79,7 +79,7 @@ ...@@ -79,7 +79,7 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/pci_ids.h> #include <linux/pci.h>
#include <linux/power_supply.h> #include <linux/power_supply.h>
#include <sound/core.h> #include <sound/core.h>
#include <sound/control.h> #include <sound/control.h>
...@@ -4212,7 +4212,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event) ...@@ -4212,7 +4212,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
known_ev = true; known_ev = true;
break; break;
} }
/* fallthrough to default */ /* fallthrough - to default */
default: default:
known_ev = false; known_ev = false;
} }
...@@ -4501,6 +4501,74 @@ static void bluetooth_exit(void) ...@@ -4501,6 +4501,74 @@ static void bluetooth_exit(void)
bluetooth_shutdown(); bluetooth_shutdown();
} }
static const struct dmi_system_id bt_fwbug_list[] __initconst = {
{
.ident = "ThinkPad E485",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20KU"),
},
},
{
.ident = "ThinkPad E585",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20KV"),
},
},
{
.ident = "ThinkPad A285 - 20MW",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MW"),
},
},
{
.ident = "ThinkPad A285 - 20MX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MX"),
},
},
{
.ident = "ThinkPad A485 - 20MU",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MU"),
},
},
{
.ident = "ThinkPad A485 - 20MV",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_BOARD_NAME, "20MV"),
},
},
{}
};
static const struct pci_device_id fwbug_cards_ids[] __initconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x24FD) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2526) },
{}
};
static int __init have_bt_fwbug(void)
{
/*
* Some AMD based ThinkPads have a firmware bug that calling
* "GBDC" will cause bluetooth on Intel wireless cards blocked
*/
if (dmi_check_system(bt_fwbug_list) && pci_dev_present(fwbug_cards_ids)) {
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
FW_BUG "disable bluetooth subdriver for Intel cards\n");
return 1;
} else
return 0;
}
static int __init bluetooth_init(struct ibm_init_struct *iibm) static int __init bluetooth_init(struct ibm_init_struct *iibm)
{ {
int res; int res;
...@@ -4513,7 +4581,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm) ...@@ -4513,7 +4581,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, /* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
G4x, R30, R31, R40e, R50e, T20-22, X20-21 */ G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
tp_features.bluetooth = hkey_handle && tp_features.bluetooth = !have_bt_fwbug() && hkey_handle &&
acpi_evalf(hkey_handle, &status, "GBDC", "qd"); acpi_evalf(hkey_handle, &status, "GBDC", "qd");
vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL, vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_RFKILL,
...@@ -5808,7 +5876,7 @@ static int led_set_status(const unsigned int led, ...@@ -5808,7 +5876,7 @@ static int led_set_status(const unsigned int led,
return -EPERM; return -EPERM;
if (!acpi_evalf(led_handle, NULL, NULL, "vdd", if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
(1 << led), led_sled_arg1[ledstatus])) (1 << led), led_sled_arg1[ledstatus]))
rc = -EIO; return -EIO;
break; break;
case TPACPI_LED_OLD: case TPACPI_LED_OLD:
/* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */ /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
...@@ -5832,10 +5900,10 @@ static int led_set_status(const unsigned int led, ...@@ -5832,10 +5900,10 @@ static int led_set_status(const unsigned int led,
return -EPERM; return -EPERM;
if (!acpi_evalf(led_handle, NULL, NULL, "vdd", if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
led, led_led_arg1[ledstatus])) led, led_led_arg1[ledstatus]))
rc = -EIO; return -EIO;
break; break;
default: default:
rc = -ENXIO; return -ENXIO;
} }
if (!rc) if (!rc)
...@@ -6249,8 +6317,8 @@ static int thermal_get_sensor(int idx, s32 *value) ...@@ -6249,8 +6317,8 @@ static int thermal_get_sensor(int idx, s32 *value)
t = TP_EC_THERMAL_TMP8; t = TP_EC_THERMAL_TMP8;
idx -= 8; idx -= 8;
} }
/* fallthrough */
#endif #endif
/* fallthrough */
case TPACPI_THERMAL_TPEC_8: case TPACPI_THERMAL_TPEC_8:
if (idx <= 7) { if (idx <= 7) {
if (!acpi_ec_read(t + idx, &tmp)) if (!acpi_ec_read(t + idx, &tmp))
...@@ -9890,6 +9958,37 @@ static char __init tpacpi_parse_fw_id(const char * const s, ...@@ -9890,6 +9958,37 @@ static char __init tpacpi_parse_fw_id(const char * const s,
return '\0'; return '\0';
} }
static void find_new_ec_fwstr(const struct dmi_header *dm, void *private)
{
char *ec_fw_string = (char *) private;
const char *dmi_data = (const char *)dm;
/*
* ThinkPad Embedded Controller Program Table on newer models
*
* Offset | Name | Width | Description
* ----------------------------------------------------
* 0x00 | Type | BYTE | 0x8C
* 0x01 | Length | BYTE |
* 0x02 | Handle | WORD | Varies
* 0x04 | Signature | BYTEx6 | ASCII for "LENOVO"
* 0x0A | OEM struct offset | BYTE | 0x0B
* 0x0B | OEM struct number | BYTE | 0x07, for this structure
* 0x0C | OEM struct revision | BYTE | 0x01, for this format
* 0x0D | ECP version ID | STR ID |
* 0x0E | ECP release date | STR ID |
*/
/* Return if data structure not match */
if (dm->type != 140 || dm->length < 0x0F ||
memcmp(dmi_data + 4, "LENOVO", 6) != 0 ||
dmi_data[0x0A] != 0x0B || dmi_data[0x0B] != 0x07 ||
dmi_data[0x0C] != 0x01)
return;
/* fwstr is the first 8byte string */
strncpy(ec_fw_string, dmi_data + 0x0F, 8);
}
/* returns 0 - probe ok, or < 0 - probe error. /* returns 0 - probe ok, or < 0 - probe error.
* Probe ok doesn't mean thinkpad found. * Probe ok doesn't mean thinkpad found.
* On error, kfree() cleanup on tp->* is not performed, caller must do it */ * On error, kfree() cleanup on tp->* is not performed, caller must do it */
...@@ -9897,7 +9996,7 @@ static int __must_check __init get_thinkpad_model_data( ...@@ -9897,7 +9996,7 @@ static int __must_check __init get_thinkpad_model_data(
struct thinkpad_id_data *tp) struct thinkpad_id_data *tp)
{ {
const struct dmi_device *dev = NULL; const struct dmi_device *dev = NULL;
char ec_fw_string[18]; char ec_fw_string[18] = {0};
char const *s; char const *s;
char t; char t;
...@@ -9937,20 +10036,25 @@ static int __must_check __init get_thinkpad_model_data( ...@@ -9937,20 +10036,25 @@ static int __must_check __init get_thinkpad_model_data(
ec_fw_string) == 1) { ec_fw_string) == 1) {
ec_fw_string[sizeof(ec_fw_string) - 1] = 0; ec_fw_string[sizeof(ec_fw_string) - 1] = 0;
ec_fw_string[strcspn(ec_fw_string, " ]")] = 0; ec_fw_string[strcspn(ec_fw_string, " ]")] = 0;
break;
}
}
tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL); /* Newer ThinkPads have different EC program info table */
if (!tp->ec_version_str) if (!ec_fw_string[0])
return -ENOMEM; dmi_walk(find_new_ec_fwstr, &ec_fw_string);
t = tpacpi_parse_fw_id(ec_fw_string, if (ec_fw_string[0]) {
&tp->ec_model, &tp->ec_release); tp->ec_version_str = kstrdup(ec_fw_string, GFP_KERNEL);
if (t != 'H') { if (!tp->ec_version_str)
pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n", return -ENOMEM;
ec_fw_string);
pr_notice("please report this to %s\n", t = tpacpi_parse_fw_id(ec_fw_string,
TPACPI_MAIL); &tp->ec_model, &tp->ec_release);
} if (t != 'H') {
break; pr_notice("ThinkPad firmware release %s doesn't match the known patterns\n",
ec_fw_string);
pr_notice("please report this to %s\n", TPACPI_MAIL);
} }
} }
...@@ -10165,7 +10269,7 @@ MODULE_PARM_DESC(volume_mode, ...@@ -10165,7 +10269,7 @@ MODULE_PARM_DESC(volume_mode,
module_param_named(volume_capabilities, volume_capabilities, uint, 0444); module_param_named(volume_capabilities, volume_capabilities, uint, 0444);
MODULE_PARM_DESC(volume_capabilities, MODULE_PARM_DESC(volume_capabilities,
"Selects the mixer capabilites: 0=auto, 1=volume and mute, 2=mute only"); "Selects the mixer capabilities: 0=auto, 1=volume and mute, 2=mute only");
module_param_named(volume_control, volume_control_allowed, bool, 0444); module_param_named(volume_control, volume_control_allowed, bool, 0444);
MODULE_PARM_DESC(volume_control, MODULE_PARM_DESC(volume_control,
......
...@@ -249,6 +249,21 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_data = { ...@@ -249,6 +249,21 @@ static const struct ts_dmi_data jumper_ezpad_6_pro_data = {
.properties = jumper_ezpad_6_pro_props, .properties = jumper_ezpad_6_pro_props,
}; };
static const struct property_entry jumper_ezpad_6_pro_b_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-jumper-ezpad-6-pro-b.fw"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data jumper_ezpad_6_pro_b_data = {
.acpi_name = "MSSL1680:00",
.properties = jumper_ezpad_6_pro_b_props,
};
static const struct property_entry jumper_ezpad_mini3_props[] = { static const struct property_entry jumper_ezpad_mini3_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 23), PROPERTY_ENTRY_U32("touchscreen-min-x", 23),
PROPERTY_ENTRY_U32("touchscreen-min-y", 16), PROPERTY_ENTRY_U32("touchscreen-min-y", 16),
...@@ -265,6 +280,23 @@ static const struct ts_dmi_data jumper_ezpad_mini3_data = { ...@@ -265,6 +280,23 @@ static const struct ts_dmi_data jumper_ezpad_mini3_data = {
.properties = jumper_ezpad_mini3_props, .properties = jumper_ezpad_mini3_props,
}; };
static const struct property_entry myria_my8307_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1720),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-myria-my8307.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data myria_my8307_data = {
.acpi_name = "MSSL1680:00",
.properties = myria_my8307_props,
};
static const struct property_entry onda_obook_20_plus_props[] = { static const struct property_entry onda_obook_20_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_U32("touchscreen-size-y", 1148),
...@@ -673,6 +705,17 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { ...@@ -673,6 +705,17 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_BIOS_DATE, "08/18/2017"), DMI_MATCH(DMI_BIOS_DATE, "08/18/2017"),
}, },
}, },
{
/* Jumper EZpad 6 Pro B */
.driver_data = (void *)&jumper_ezpad_6_pro_b_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
DMI_MATCH(DMI_PRODUCT_NAME, "EZpad"),
DMI_MATCH(DMI_BIOS_VERSION, "5.12"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
},
},
{ {
/* Jumper EZpad mini3 */ /* Jumper EZpad mini3 */
.driver_data = (void *)&jumper_ezpad_mini3_data, .driver_data = (void *)&jumper_ezpad_mini3_data,
...@@ -690,6 +733,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { ...@@ -690,6 +733,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"), DMI_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
}, },
}, },
{
/* Myria MY8307 */
.driver_data = (void *)&myria_my8307_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Complet Electro Serv"),
DMI_MATCH(DMI_PRODUCT_NAME, "MY8307"),
},
},
{ {
/* Onda oBook 20 Plus */ /* Onda oBook 20 Plus */
.driver_data = (void *)&onda_obook_20_plus_data, .driver_data = (void *)&onda_obook_20_plus_data,
......
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
/* Input */ /* Input */
#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011 #define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012 #define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
#define ASUS_WMI_DEVID_FNLOCK 0x00100023
/* Fan, Thermal */ /* Fan, Thermal */
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011 #define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment