Commit 2bf16b7a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'char-misc-4.15-rc1' of...

Merge tag 'char-misc-4.15-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc updates from Greg KH:
 "Here is the big set of char/misc and other driver subsystem patches
  for 4.15-rc1.

  There are small changes all over here, hyperv driver updates, pcmcia
  driver updates, w1 driver updats, vme driver updates, nvmem driver
  updates, and lots of other little one-off driver updates as well. The
  shortlog has the full details.

  All of these have been in linux-next for quite a while with no
  reported issues"

* tag 'char-misc-4.15-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (90 commits)
  VME: Return -EBUSY when DMA list in use
  w1: keep balance of mutex locks and refcnts
  MAINTAINERS: Update VME subsystem tree.
  nvmem: sunxi-sid: add support for A64/H5's SID controller
  nvmem: imx-ocotp: Update module description
  nvmem: imx-ocotp: Enable i.MX7D OTP write support
  nvmem: imx-ocotp: Add i.MX7D timing write clock setup support
  nvmem: imx-ocotp: Move i.MX6 write clock setup to dedicated function
  nvmem: imx-ocotp: Add support for banked OTP addressing
  nvmem: imx-ocotp: Pass parameters via a struct
  nvmem: imx-ocotp: Restrict OTP write to IMX6 processors
  nvmem: uniphier: add UniPhier eFuse driver
  dt-bindings: nvmem: add description for UniPhier eFuse
  nvmem: set nvmem->owner to nvmem->dev->driver->owner if unset
  nvmem: qfprom: fix different address space warnings of sparse
  nvmem: mtk-efuse: fix different address space warnings of sparse
  nvmem: mtk-efuse: use stack for nvmem_config instead of malloc'ing it
  nvmem: imx-iim: use stack for nvmem_config instead of malloc'ing it
  thunderbolt: tb: fix use after free in tb_activate_pcie_devices
  MAINTAINERS: Add git tree for Thunderbolt development
  ...
parents b9743042 f13d1a8a
......@@ -41,3 +41,73 @@ KernelVersion: 4.5
Contact: K. Y. Srinivasan <kys@microsoft.com>
Description: The 16 bit vendor ID of the device
Users: tools/hv/lsvmbus and user level RDMA libraries
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/cpu
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: VCPU (sub)channel is affinitized to
Users: tools/hv/lsvmbus and other debuggig tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/cpu
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: VCPU (sub)channel is affinitized to
Users: tools/hv/lsvmbus and other debuggig tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/in_mask
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Inbound channel signaling state
Users: Debugging tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/latency
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Channel signaling latency
Users: Debugging tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/out_mask
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Outbound channel signaling state
Users: Debugging tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/pending
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Channel interrupt pending state
Users: Debugging tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/read_avail
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Bytes availabble to read
Users: Debugging tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/write_avail
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Bytes availabble to write
Users: Debugging tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/events
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Number of times we have signaled the host
Users: Debugging tools
What: /sys/bus/vmbus/devices/vmbus_*/channels/relid/interrupts
Date: September. 2017
KernelVersion: 4.14
Contact: Stephen Hemminger <sthemmin@microsoft.com>
Description: Number of times we have taken an interrupt (incoming)
Users: Debugging tools
What: /sys/bus/w1/devices/19-<id>/speed
Date: Sep 2017
KernelVersion: 4.14
Contact: Jan Kandziora <jjj@gmx.de>
Description: When written, this file sets the I2C speed on the connected
DS28E17 chip. When read, it reads the current setting from
the DS28E17 chip.
Valid values: 100, 400, 900 [kBaud].
Default 100, can be set by w1_ds28e17.speed= module parameter.
Users: w1_ds28e17 driver
What: /sys/bus/w1/devices/19-<id>/stretch
Date: Sep 2017
KernelVersion: 4.14
Contact: Jan Kandziora <jjj@gmx.de>
Description: When written, this file sets the multiplier used to calculate
the busy timeout for I2C operations on the connected DS28E17
chip. When read, returns the current setting.
Valid values: 1 to 9.
Default 1, can be set by w1_ds28e17.stretch= module parameter.
Users: w1_ds28e17 driver
......@@ -5,6 +5,7 @@ Required properties:
"allwinner,sun4i-a10-sid"
"allwinner,sun7i-a20-sid"
"allwinner,sun8i-h3-sid"
"allwinner,sun50i-a64-sid"
- reg: Should contain registers location and length
......
= Amlogic eFuse device tree bindings =
= Amlogic Meson GX eFuse device tree bindings =
Required properties:
- compatible: should be "amlogic,meson-gxbb-efuse"
......
Amlogic Meson6/Meson8/Meson8b efuse
Required Properties:
- compatible: depending on the SoC this should be one of:
- "amlogic,meson6-efuse"
- "amlogic,meson8-efuse"
- "amlogic,meson8b-efuse"
- reg: base address and size of the efuse registers
- clocks: a reference to the efuse core gate clock
- clock-names: must be "core"
All properties and sub-nodes as well as the consumer bindings
defined in nvmem.txt in this directory are also supported.
Example:
efuse: nvmem@0 {
compatible = "amlogic,meson8-efuse";
reg = <0x0 0x2000>;
clocks = <&clkc CLKID_EFUSE>;
clock-names = "core";
};
......@@ -6,6 +6,7 @@ Required properties:
- "rockchip,rk3188-efuse" - for RK3188 SoCs.
- "rockchip,rk3228-efuse" - for RK3228 SoCs.
- "rockchip,rk3288-efuse" - for RK3288 SoCs.
- "rockchip,rk3368-efuse" - for RK3368 SoCs.
- "rockchip,rk3399-efuse" - for RK3399 SoCs.
- reg: Should contain the registers location and exact eFuse size
- clocks: Should be the clock id of eFuse
......
Device tree bindings for Low Power General Purpose Register found in i.MX6Q/D
Secure Non-Volatile Storage.
This DT node should be represented as a sub-node of a "syscon",
"simple-mfd" node.
Required properties:
- compatible: should be one of the fallowing variants:
"fsl,imx6q-snvs-lpgpr" for Freescale i.MX6Q/D/DL/S
"fsl,imx6ul-snvs-lpgpr" for Freescale i.MX6UL
Example:
snvs: snvs@020cc000 {
compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
reg = <0x020cc000 0x4000>;
snvs_lpgpr: snvs-lpgpr {
compatible = "fsl,imx6q-snvs-lpgpr";
};
};
= UniPhier eFuse device tree bindings =
This UniPhier eFuse must be under soc-glue.
Required properties:
- compatible: should be "socionext,uniphier-efuse"
- reg: should contain the register location and length
= Data cells =
Are child nodes of efuse, bindings of which as described in
bindings/nvmem/nvmem.txt
Example:
soc-glue@5f900000 {
compatible = "socionext,uniphier-ld20-soc-glue-debug",
"simple-mfd";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x5f900000 0x2000>;
efuse@100 {
compatible = "socionext,uniphier-efuse";
reg = <0x100 0x28>;
};
efuse@200 {
compatible = "socionext,uniphier-efuse";
reg = <0x200 0x68>;
#address-cells = <1>;
#size-cells = <1>;
/* Data cells */
usb_mon: usb-mon@54 {
reg = <0x54 0xc>;
};
};
};
= Data consumers =
Are device nodes which consume nvmem data cells.
Example:
usb {
...
nvmem-cells = <&usb_mon>;
nvmem-cell-names = "usb_mon";
}
......@@ -149,11 +149,23 @@ If you want to limit idle states at boot time, you can use "nohlt" or
At the runtime you can disable idle states with below methods:
Set latency request to /dev/cpu_dma_latency to disable all CPUs specific idle
states (if latency = 0uS then disable all idle states):
# echo "what_ever_latency_you_need_in_uS" > /dev/cpu_dma_latency
Disable specific CPU's specific idle state:
It is possible to disable CPU idle states by way of the PM QoS
subsystem, more specifically by using the "/dev/cpu_dma_latency"
interface (see Documentation/power/pm_qos_interface.txt for more
details). As specified in the PM QoS documentation the requested
parameter will stay in effect until the file descriptor is released.
For example:
# exec 3<> /dev/cpu_dma_latency; echo 0 >&3
...
Do some work...
...
# exec 3<>-
The same can also be done from an application program.
Disable specific CPU's specific idle state from cpuidle sysfs (see
Documentation/cpuidle/sysfs.txt):
# echo 1 > /sys/devices/system/cpu/cpu$cpu/cpuidle/state$state/disable
......
......@@ -10,3 +10,5 @@ w1_ds2438
- The Maxim/Dallas Semiconductor ds2438 smart battery monitor.
w1_ds28e04
- The Maxim/Dallas Semiconductor ds28e04 eeprom.
w1_ds28e17
- The Maxim/Dallas Semiconductor ds28e17 1-Wire-to-I2C Master Bridge.
Kernel driver w1_ds28e17
========================
Supported chips:
* Maxim DS28E17 1-Wire-to-I2C Master Bridge
supported family codes:
W1_FAMILY_DS28E17 0x19
Author: Jan Kandziora <jjj@gmx.de>
Description
-----------
The DS28E17 is a Onewire slave device which acts as an I2C bus master.
This driver creates a new I2C bus for any DS28E17 device detected. I2C buses
come and go as the DS28E17 devices come and go. I2C slave devices connected to
a DS28E17 can be accessed by the kernel or userspace tools as if they were
connected to a "native" I2C bus master.
An udev rule like the following
-------------------------------------------------------------------------------
SUBSYSTEM=="i2c-dev", KERNEL=="i2c-[0-9]*", ATTRS{name}=="w1-19-*", \
SYMLINK+="i2c-$attr{name}"
-------------------------------------------------------------------------------
may be used to create stable /dev/i2c- entries based on the unique id of the
DS28E17 chip.
Driver parameters are:
speed:
This sets up the default I2C speed a DS28E17 get configured for as soon
it is connected. The power-on default of the DS28E17 is 400kBaud, but
chips may come and go on the Onewire bus without being de-powered and
as soon the "w1_ds28e17" driver notices a freshly connected, or
reconnected DS28E17 device on the Onewire bus, it will re-apply this
setting.
Valid values are 100, 400, 900 [kBaud]. Any other value means to leave
alone the current DS28E17 setting on detect. The default value is 100.
stretch:
This sets up the default stretch value used for freshly connected
DS28E17 devices. It is a multiplier used on the calculation of the busy
wait time for an I2C transfer. This is to account for I2C slave devices
which make heavy use of the I2C clock stretching feature and thus, the
needed timeout cannot be pre-calculated correctly. As the w1_ds28e17
driver checks the DS28E17's busy flag in a loop after the precalculated
wait time, it should be hardly needed to tweak this setting.
Leave it at 1 unless you get ETIMEDOUT errors and a "w1_slave_driver
19-00000002dbd8: busy timeout" in the kernel log.
Valid values are 1 to 9. The default is 1.
The driver creates sysfs files /sys/bus/w1/devices/19-<id>/speed and
/sys/bus/w1/devices/19-<id>/stretch for each device, preloaded with the default
settings from the driver parameters. They may be changed anytime. In addition a
directory /sys/bus/w1/devices/19-<id>/i2c-<nnn> for the I2C bus master sysfs
structure is created.
See https://github.com/ianka/w1_ds28e17 for even more information.
......@@ -5474,7 +5474,7 @@ K: fmc_d.*register
FPGA MANAGER FRAMEWORK
M: Alan Tull <atull@kernel.org>
R: Moritz Fischer <mdf@kernel.org>
M: Moritz Fischer <mdf@kernel.org>
L: linux-fpga@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
......@@ -13390,6 +13390,7 @@ M: Andreas Noever <andreas.noever@gmail.com>
M: Michael Jamet <michael.jamet@intel.com>
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <yehezkel.bernat@intel.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
S: Maintained
F: drivers/thunderbolt/
F: include/linux/thunderbolt.h
......@@ -14485,7 +14486,7 @@ M: Manohar Vanga <manohar.vanga@gmail.com>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: devel@driverdev.osuosl.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/driver-api/vme.rst
F: drivers/staging/vme/
F: drivers/vme/
......
......@@ -210,9 +210,10 @@ void hyperv_cleanup(void)
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
void hyperv_report_panic(struct pt_regs *regs)
void hyperv_report_panic(struct pt_regs *regs, long err)
{
static bool panic_reported;
u64 guest_id;
/*
* We prefer to report panic on 'die' chain as we have proper
......@@ -223,11 +224,13 @@ void hyperv_report_panic(struct pt_regs *regs)
return;
panic_reported = true;
wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
wrmsrl(HV_X64_MSR_CRASH_P0, err);
wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
/*
* Let Hyper-V know there is crash data available
......
......@@ -311,7 +311,7 @@ static inline int hv_cpu_number_to_vp_number(int cpu_number)
void hyperv_init(void);
void hyperv_setup_mmu_ops(void);
void hyper_alloc_mmu(void);
void hyperv_report_panic(struct pt_regs *regs);
void hyperv_report_panic(struct pt_regs *regs, long err);
bool hv_is_hypercall_page_setup(void);
void hyperv_cleanup(void);
#else /* CONFIG_HYPERV */
......
......@@ -2192,7 +2192,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
off_start,
offp - off_start);
if (!parent) {
pr_err("transaction release %d bad parent offset",
pr_err("transaction release %d bad parent offset\n",
debug_id);
continue;
}
......
......@@ -186,12 +186,12 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
}
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
void *start, void *end,
struct vm_area_struct *vma)
void *start, void *end)
{
void *page_addr;
unsigned long user_page_addr;
struct binder_lru_page *page;
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = NULL;
bool need_mm = false;
......@@ -215,7 +215,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
mm = alloc->vma_vm_mm;
if (mm) {
......@@ -437,7 +437,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
if (ret)
return ERR_PTR(ret);
......@@ -478,7 +478,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
err_alloc_buf_struct_failed:
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
end_page_addr, NULL);
end_page_addr);
return ERR_PTR(-ENOMEM);
}
......@@ -562,8 +562,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
alloc->pid, buffer->data,
prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE,
NULL);
buffer_start_page(buffer) + PAGE_SIZE);
}
list_del(&buffer->entry);
kfree(buffer);
......@@ -600,8 +599,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
binder_update_page_range(alloc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
......@@ -984,7 +982,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
return ret;
}
struct shrinker binder_shrinker = {
static struct shrinker binder_shrinker = {
.count_objects = binder_shrink_count,
.scan_objects = binder_shrink_scan,
.seeks = DEFAULT_SEEKS,
......
......@@ -659,9 +659,9 @@ static void terminate_monitor(struct cm4000_dev *dev)
* is already doing that for you.
*/
static void monitor_card(unsigned long p)
static void monitor_card(struct timer_list *t)
{
struct cm4000_dev *dev = (struct cm4000_dev *) p;
struct cm4000_dev *dev = from_timer(dev, t, timer);
unsigned int iobase = dev->p_dev->resource[0]->start;
unsigned short s;
struct ptsreq ptsreq;
......@@ -1374,7 +1374,7 @@ static void start_monitor(struct cm4000_dev *dev)
DEBUGP(3, dev, "-> start_monitor\n");
if (!dev->monitor_running) {
DEBUGP(5, dev, "create, init and add timer\n");
setup_timer(&dev->timer, monitor_card, (unsigned long)dev);
timer_setup(&dev->timer, monitor_card, 0);
dev->monitor_running = 1;
mod_timer(&dev->timer, jiffies);
} else
......
......@@ -104,9 +104,9 @@ static inline unsigned char xinb(unsigned short port)
/* poll the device fifo status register. not to be confused with
* the poll syscall. */
static void cm4040_do_poll(unsigned long dummy)
static void cm4040_do_poll(struct timer_list *t)
{
struct reader_dev *dev = (struct reader_dev *) dummy;
struct reader_dev *dev = from_timer(dev, t, poll_timer);
unsigned int obs = xinb(dev->p_dev->resource[0]->start
+ REG_OFFSET_BUFFER_STATUS);
......@@ -465,7 +465,6 @@ static int cm4040_open(struct inode *inode, struct file *filp)
link->open = 1;
dev->poll_timer.data = (unsigned long) dev;
mod_timer(&dev->poll_timer, jiffies + POLL_PERIOD);
DEBUGP(2, dev, "<- cm4040_open (successfully)\n");
......@@ -585,7 +584,7 @@ static int reader_probe(struct pcmcia_device *link)
init_waitqueue_head(&dev->poll_wait);
init_waitqueue_head(&dev->read_wait);
init_waitqueue_head(&dev->write_wait);
setup_timer(&dev->poll_timer, cm4040_do_poll, 0);
timer_setup(&dev->poll_timer, cm4040_do_poll, 0);
ret = reader_config(link, i);
if (ret) {
......
......@@ -375,7 +375,7 @@ static void reset_device(MGSLPC_INFO *info);
static void hdlc_mode(MGSLPC_INFO *info);
static void async_mode(MGSLPC_INFO *info);
static void tx_timeout(unsigned long context);
static void tx_timeout(struct timer_list *t);
static int carrier_raised(struct tty_port *port);
static void dtr_rts(struct tty_port *port, int onoff);
......@@ -1289,7 +1289,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
memset(&info->icount, 0, sizeof(info->icount));
setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
timer_setup(&info->tx_timer, tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = claim_resources(info);
......@@ -3846,9 +3846,9 @@ static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit)
/* HDLC frame time out
* update stats and do tx completion processing
*/
static void tx_timeout(unsigned long context)
static void tx_timeout(struct timer_list *t)
{
MGSLPC_INFO *info = (MGSLPC_INFO*)context;
MGSLPC_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
......
......@@ -79,7 +79,7 @@ static int xlnx_pr_decoupler_enable_show(struct fpga_bridge *bridge)
return !status;
}
static struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
static const struct fpga_bridge_ops xlnx_pr_decoupler_br_ops = {
.enable_set = xlnx_pr_decoupler_enable_set,
.enable_show = xlnx_pr_decoupler_enable_show,
};
......
......@@ -185,7 +185,7 @@ static int fsi_slave_calc_addr(struct fsi_slave *slave, uint32_t *addrp,
return 0;
}
int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
static int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
{
struct fsi_master *master = slave->master;
uint32_t irq, stat;
......@@ -215,8 +215,8 @@ int fsi_slave_report_and_clear_errors(struct fsi_slave *slave)
static int fsi_slave_set_smode(struct fsi_master *master, int link, int id);
int fsi_slave_handle_error(struct fsi_slave *slave, bool write, uint32_t addr,
size_t size)
static int fsi_slave_handle_error(struct fsi_slave *slave, bool write,
uint32_t addr, size_t size)
{
struct fsi_master *master = slave->master;
int rc, link;
......
......@@ -3,7 +3,9 @@ obj-$(CONFIG_HYPERV) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
CFLAGS_hv_trace.o = -I$(src)
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o
channel_mgmt.o ring_buffer.o hv_trace.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_fcopy.o hv_utils_transport.o
......@@ -43,6 +43,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
{
struct hv_monitor_page *monitorpage;
trace_vmbus_setevent(channel);
/*
* For channels marked as in "low latency" mode
* bypass the monitor page mechanism.
......@@ -185,6 +187,8 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
ret = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
trace_vmbus_open(open_msg, ret);
if (ret != 0) {
err = ret;
goto error_clean_msglist;
......@@ -234,13 +238,18 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
const uuid_le *shv_host_servie_id)
{
struct vmbus_channel_tl_connect_request conn_msg;
int ret;
memset(&conn_msg, 0, sizeof(conn_msg));
conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
conn_msg.host_service_id = *shv_host_servie_id;
return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
trace_vmbus_send_tl_connect_request(&conn_msg, ret);
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
......@@ -433,6 +442,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
sizeof(*msginfo), true);
trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
if (ret != 0)
goto cleanup;
......@@ -448,6 +460,9 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
ret = vmbus_post_msg(gpadl_body,
submsginfo->msgsize - sizeof(*submsginfo),
true);
trace_vmbus_establish_gpadl_body(gpadl_body, ret);
if (ret != 0)
goto cleanup;
......@@ -511,6 +526,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
true);
trace_vmbus_teardown_gpadl(msg, ret);
if (ret)
goto post_msg_err;
......@@ -589,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
true);
trace_vmbus_close_internal(msg, ret);
if (ret) {
pr_err("Close failed: close post msg return is %d\n", ret);
/*
......@@ -745,6 +764,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
desc.length8 = (u16)(packetlen_aligned >> 3);
desc.transactionid = requestid;
desc.reserved = 0;
desc.rangecount = pagecount;
for (i = 0; i < pagecount; i++) {
......@@ -788,6 +808,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
desc->length8 = (u16)(packetlen_aligned >> 3);
desc->transactionid = requestid;
desc->reserved = 0;
desc->rangecount = 1;
bufferlist[0].iov_base = desc;
......
......@@ -350,7 +350,7 @@ static void free_channel(struct vmbus_channel *channel)
{
tasklet_kill(&channel->callback_event);
kfree_rcu(channel, rcu);
kobject_put(&channel->kobj);
}
static void percpu_channel_enq(void *arg)
......@@ -373,12 +373,15 @@ static void percpu_channel_deq(void *arg)
static void vmbus_release_relid(u32 relid)
{
struct vmbus_channel_relid_released msg;
int ret;
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
msg.child_relid = relid;
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
true);
ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
true);
trace_vmbus_release_relid(&msg, ret);
}
void hv_process_channel_removal(u32 relid)
......@@ -520,6 +523,14 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
newchannel->state = CHANNEL_OPEN_STATE;
if (!fnew) {
struct hv_device *dev
= newchannel->primary_channel->device_obj;
if (vmbus_add_channel_kobj(dev, newchannel)) {
atomic_dec(&vmbus_connection.offer_in_progress);
goto err_free_chan;
}
if (channel->sc_creation_callback != NULL)
channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
......@@ -805,6 +816,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
offer = (struct vmbus_channel_offer_channel *)hdr;
trace_vmbus_onoffer(offer);
/* Allocate the channel object and save this offer. */
newchannel = alloc_channel();
if (!newchannel) {
......@@ -846,6 +859,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
rescind = (struct vmbus_channel_rescind_offer *)hdr;
trace_vmbus_onoffer_rescind(rescind);
/*
* The offer msg and the corresponding rescind msg
* from the host are guranteed to be ordered -
......@@ -974,6 +989,8 @@ static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
result = (struct vmbus_channel_open_result *)hdr;
trace_vmbus_onopen_result(result);
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
......@@ -1018,6 +1035,8 @@ static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
trace_vmbus_ongpadl_created(gpadlcreated);
/*
* Find the establish msg, copy the result and signal/unblock the wait
* event
......@@ -1066,6 +1085,8 @@ static void vmbus_ongpadl_torndown(
gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
trace_vmbus_ongpadl_torndown(gpadl_torndown);
/*
* Find the open msg, copy the result and signal/unblock the wait event
*/
......@@ -1109,6 +1130,9 @@ static void vmbus_onversion_response(
unsigned long flags;
version_response = (struct vmbus_channel_version_response *)hdr;
trace_vmbus_onversion_response(version_response);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
......@@ -1168,6 +1192,8 @@ void vmbus_onmessage(void *context)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
size = msg->header.payload_size;
trace_vmbus_on_message(hdr);
if (hdr->msgtype >= CHANNELMSG_COUNT) {
pr_err("Received invalid channel message type %d size %d\n",
hdr->msgtype, size);
......@@ -1201,9 +1227,11 @@ int vmbus_request_offers(void)
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
true);
trace_vmbus_request_offers(ret);
if (ret != 0) {
pr_err("Unable to request offers - %d\n", ret);
......
......@@ -117,6 +117,9 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
ret = vmbus_post_msg(msg,
sizeof(struct vmbus_channel_initiate_contact),
true);
trace_vmbus_negotiate_version(msg, ret);
if (ret != 0) {
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
......@@ -319,6 +322,8 @@ void vmbus_on_event(unsigned long data)
struct vmbus_channel *channel = (void *) data;
unsigned long time_limit = jiffies + 2;
trace_vmbus_on_event(channel);
do {
void (*callback_fn)(void *);
......@@ -409,6 +414,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
if (!channel->is_dedicated_interrupt)
vmbus_send_interrupt(child_relid);
++channel->sig_events;
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
#include "hyperv_vmbus.h"
#define CREATE_TRACE_POINTS
#include "hv_trace.h"
This diff is collapsed.
......@@ -31,6 +31,8 @@
#include <linux/hyperv.h>
#include <linux/interrupt.h>
#include "hv_trace.h"
/*
* Timeout for services such as KVP and fcopy.
*/
......@@ -373,6 +375,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
int vmbus_device_register(struct hv_device *child_device_obj);
void vmbus_device_unregister(struct hv_device *device_obj);
int vmbus_add_channel_kobj(struct hv_device *device_obj,
struct vmbus_channel *channel);
struct vmbus_channel *relid2channel(u32 relid);
......
......@@ -65,7 +65,7 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
regs = current_pt_regs();
hyperv_report_panic(regs);
hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
......@@ -75,7 +75,7 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
struct die_args *die = (struct die_args *)args;
struct pt_regs *regs = die->regs;
hyperv_report_panic(regs);
hyperv_report_panic(regs, val);
return NOTIFY_DONE;
}
......@@ -107,28 +107,30 @@ static void print_alias_name(struct hv_device *hv_dev, char *alias_name)
sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]);
}
static u8 channel_monitor_group(struct vmbus_channel *channel)
static u8 channel_monitor_group(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid / 32;
}
static u8 channel_monitor_offset(struct vmbus_channel *channel)
static u8 channel_monitor_offset(const struct vmbus_channel *channel)
{
return (u8)channel->offermsg.monitorid % 32;
}
static u32 channel_pending(struct vmbus_channel *channel,
struct hv_monitor_page *monitor_page)
static u32 channel_pending(const struct vmbus_channel *channel,
const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
return monitor_page->trigger_group[monitor_group].pending;
}
static u32 channel_latency(struct vmbus_channel *channel,
struct hv_monitor_page *monitor_page)
static u32 channel_latency(const struct vmbus_channel *channel,
const struct hv_monitor_page *monitor_page)
{
u8 monitor_group = channel_monitor_group(channel);
u8 monitor_offset = channel_monitor_offset(channel);
return monitor_page->latency[monitor_group][monitor_offset];
}
......@@ -833,6 +835,8 @@ void vmbus_on_msg_dpc(unsigned long data)
hdr = (struct vmbus_channel_message_header *)msg->u.payload;
trace_vmbus_on_msg_dpc(hdr);
if (hdr->msgtype >= CHANNELMSG_COUNT) {
WARN_ONCE(1, "unknown msgtype=%d\n", hdr->msgtype);
goto msg_handled;
......@@ -942,6 +946,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (channel->rescind)
continue;
trace_vmbus_chan_sched(channel);
++channel->interrupts;
switch (channel->callback_mode) {
case HV_CALL_ISR:
vmbus_channel_isr(channel);
......@@ -1133,6 +1141,159 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
/*
* Called when last reference to channel is gone.
*/
static void vmbus_chan_release(struct kobject *kobj)
{
struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
kfree_rcu(channel, rcu);
}
struct vmbus_chan_attribute {
struct attribute attr;
ssize_t (*show)(const struct vmbus_channel *chan, char *buf);
ssize_t (*store)(struct vmbus_channel *chan,
const char *buf, size_t count);
};
#define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
struct vmbus_chan_attribute chan_attr_##_name \
= __ATTR(_name, _mode, _show, _store)
#define VMBUS_CHAN_ATTR_RW(_name) \
struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
#define VMBUS_CHAN_ATTR_RO(_name) \
struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
#define VMBUS_CHAN_ATTR_WO(_name) \
struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
const struct vmbus_chan_attribute *attribute
= container_of(attr, struct vmbus_chan_attribute, attr);
const struct vmbus_channel *chan
= container_of(kobj, struct vmbus_channel, kobj);
if (!attribute->show)
return -EIO;
return attribute->show(chan, buf);
}
static const struct sysfs_ops vmbus_chan_sysfs_ops = {
.show = vmbus_chan_attr_show,
};
static ssize_t out_mask_show(const struct vmbus_channel *channel, char *buf)
{
const struct hv_ring_buffer_info *rbi = &channel->outbound;
return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
}
VMBUS_CHAN_ATTR_RO(out_mask);
static ssize_t in_mask_show(const struct vmbus_channel *channel, char *buf)
{
const struct hv_ring_buffer_info *rbi = &channel->inbound;
return sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
}
VMBUS_CHAN_ATTR_RO(in_mask);
static ssize_t read_avail_show(const struct vmbus_channel *channel, char *buf)
{
const struct hv_ring_buffer_info *rbi = &channel->inbound;
return sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
}
VMBUS_CHAN_ATTR_RO(read_avail);
static ssize_t write_avail_show(const struct vmbus_channel *channel, char *buf)
{
const struct hv_ring_buffer_info *rbi = &channel->outbound;
return sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
}
VMBUS_CHAN_ATTR_RO(write_avail);
static ssize_t show_target_cpu(const struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%u\n", channel->target_cpu);
}
VMBUS_CHAN_ATTR(cpu, S_IRUGO, show_target_cpu, NULL);
static ssize_t channel_pending_show(const struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
channel_pending(channel,
vmbus_connection.monitor_pages[1]));
}
VMBUS_CHAN_ATTR(pending, S_IRUGO, channel_pending_show, NULL);
static ssize_t channel_latency_show(const struct vmbus_channel *channel,
char *buf)
{
return sprintf(buf, "%d\n",
channel_latency(channel,
vmbus_connection.monitor_pages[1]));
}
VMBUS_CHAN_ATTR(latency, S_IRUGO, channel_latency_show, NULL);
static ssize_t channel_interrupts_show(const struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->interrupts);
}
VMBUS_CHAN_ATTR(interrupts, S_IRUGO, channel_interrupts_show, NULL);
static ssize_t channel_events_show(const struct vmbus_channel *channel, char *buf)
{
return sprintf(buf, "%llu\n", channel->sig_events);
}
VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL);
static struct attribute *vmbus_chan_attrs[] = {
&chan_attr_out_mask.attr,
&chan_attr_in_mask.attr,
&chan_attr_read_avail.attr,
&chan_attr_write_avail.attr,
&chan_attr_cpu.attr,
&chan_attr_pending.attr,
&chan_attr_latency.attr,
&chan_attr_interrupts.attr,
&chan_attr_events.attr,
NULL
};
static struct kobj_type vmbus_chan_ktype = {
.sysfs_ops = &vmbus_chan_sysfs_ops,
.release = vmbus_chan_release,
.default_attrs = vmbus_chan_attrs,
};
/*
* vmbus_add_channel_kobj - setup a sub-directory under device/channels
*/
int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
{
struct kobject *kobj = &channel->kobj;
u32 relid = channel->offermsg.child_relid;
int ret;
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
"%u", relid);
if (ret)
return ret;
kobject_uevent(kobj, KOBJ_ADD);
return 0;
}
/*
* vmbus_device_create - Creates and registers a new child device
* on the vmbus.
......@@ -1164,7 +1325,8 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
*/
int vmbus_device_register(struct hv_device *child_device_obj)
{
int ret = 0;
struct kobject *kobj = &child_device_obj->device.kobj;
int ret;
dev_set_name(&child_device_obj->device, "%pUl",
child_device_obj->channel->offermsg.offer.if_instance.b);
......@@ -1178,13 +1340,32 @@ int vmbus_device_register(struct hv_device *child_device_obj)
* binding...which will eventually call vmbus_match() and vmbus_probe()
*/
ret = device_register(&child_device_obj->device);
if (ret)
if (ret) {
pr_err("Unable to register child device\n");
else
pr_debug("child device %s registered\n",
dev_name(&child_device_obj->device));
return ret;
}
child_device_obj->channels_kset = kset_create_and_add("channels",
NULL, kobj);
if (!child_device_obj->channels_kset) {
ret = -ENOMEM;
goto err_dev_unregister;
}
ret = vmbus_add_channel_kobj(child_device_obj,
child_device_obj->channel);
if (ret) {
pr_err("Unable to register primary channeln");
goto err_kset_unregister;
}
return 0;
err_kset_unregister:
kset_unregister(child_device_obj->channels_kset);
err_dev_unregister:
device_unregister(&child_device_obj->device);
return ret;
}
......
......@@ -199,8 +199,8 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
static const struct amba_id replicator_ids[] = {
{
.id = 0x0003b909,
.mask = 0x0003ffff,
.id = 0x000bb909,
.mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
......
......@@ -748,8 +748,8 @@ static const struct dev_pm_ops etb_dev_pm_ops = {
static const struct amba_id etb_ids[] = {
{
.id = 0x0003b907,
.mask = 0x0003ffff,
.id = 0x000bb907,
.mask = 0x000fffff,
},
{ 0, 0},
};
......
......@@ -901,33 +901,33 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
static const struct amba_id etm_ids[] = {
{ /* ETM 3.3 */
.id = 0x0003b921,
.mask = 0x0003ffff,
.id = 0x000bb921,
.mask = 0x000fffff,
.data = "ETM 3.3",
},
{ /* ETM 3.5 - Cortex-A5 */
.id = 0x0003b955,
.mask = 0x0003ffff,
.id = 0x000bb955,
.mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* ETM 3.5 */
.id = 0x0003b956,
.mask = 0x0003ffff,
.id = 0x000bb956,
.mask = 0x000fffff,
.data = "ETM 3.5",
},
{ /* PTM 1.0 */
.id = 0x0003b950,
.mask = 0x0003ffff,
.id = 0x000bb950,
.mask = 0x000fffff,
.data = "PTM 1.0",
},
{ /* PTM 1.1 */
.id = 0x0003b95f,
.mask = 0x0003ffff,
.id = 0x000bb95f,
.mask = 0x000fffff,
.data = "PTM 1.1",
},
{ /* PTM 1.1 Qualcomm */
.id = 0x0003006f,
.mask = 0x0003ffff,
.id = 0x000b006f,
.mask = 0x000fffff,
.data = "PTM 1.1",
},
{ 0, 0},
......
......@@ -248,8 +248,8 @@ static const struct dev_pm_ops funnel_dev_pm_ops = {
static const struct amba_id funnel_ids[] = {
{
.id = 0x0003b908,
.mask = 0x0003ffff,
.id = 0x000bb908,
.mask = 0x000fffff,
},
{
/* Coresight SoC-600 */
......
......@@ -917,13 +917,13 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
static const struct amba_id stm_ids[] = {
{
.id = 0x0003b962,
.mask = 0x0003ffff,
.id = 0x000bb962,
.mask = 0x000fffff,
.data = "STM32",
},
{
.id = 0x0003b963,
.mask = 0x0003ffff,
.id = 0x000bb963,
.mask = 0x000fffff,
.data = "STM500",
},
{ 0, 0},
......
......@@ -439,8 +439,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
static const struct amba_id tmc_ids[] = {
{
.id = 0x0003b961,
.mask = 0x0003ffff,
.id = 0x000bb961,
.mask = 0x000fffff,
},
{
/* Coresight SoC 600 TMC-ETR/ETS */
......
......@@ -194,8 +194,8 @@ static const struct dev_pm_ops tpiu_dev_pm_ops = {
static const struct amba_id tpiu_ids[] = {
{
.id = 0x0003b912,
.mask = 0x0003ffff,
.id = 0x000bb912,
.mask = 0x000fffff,
},
{
.id = 0x0004b912,
......
comment "Altera FPGA firmware download module"
comment "Altera FPGA firmware download module (requires I2C)"
depends on !I2C
config ALTERA_STAPL
tristate "Altera FPGA firmware download module"
......
......@@ -182,6 +182,7 @@ struct dma_mapping {
struct list_head card_list; /* list of usr_maps for card */
struct list_head pin_list; /* list of pinned memory for dev */
int write; /* writable map? useful in unmapping */
};
static inline void genwqe_mapping_init(struct dma_mapping *m,
......@@ -189,6 +190,7 @@ static inline void genwqe_mapping_init(struct dma_mapping *m,
{
memset(m, 0, sizeof(*m));
m->type = type;
m->write = 1; /* Assume the maps we create are R/W */
}
/**
......@@ -347,6 +349,7 @@ enum genwqe_requ_state {
* @user_size: size of user-space memory area
* @page: buffer for partial pages if needed
* @page_dma_addr: dma address partial pages
* @write: should we write it back to userspace?
*/
struct genwqe_sgl {
dma_addr_t sgl_dma_addr;
......@@ -356,6 +359,8 @@ struct genwqe_sgl {
void __user *user_addr; /* user-space base-address */
size_t user_size; /* size of memory area */
int write;
unsigned long nr_pages;
unsigned long fpage_offs;
size_t fpage_size;
......@@ -369,7 +374,7 @@ struct genwqe_sgl {
};
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size);
void __user *user_addr, size_t user_size, int write);
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
dma_addr_t *dma_list);
......
......@@ -942,6 +942,10 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
genwqe_mapping_init(m,
GENWQE_MAPPING_SGL_TEMP);
if (ats_flags == ATS_TYPE_SGL_RD)
m->write = 0;
rc = genwqe_user_vmap(cd, m, (void *)u_addr,
u_size, req);
if (rc != 0)
......@@ -954,7 +958,7 @@ static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
/* create genwqe style scatter gather list */
rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
(void __user *)u_addr,
u_size);
u_size, m->write);
if (rc != 0)
goto err_out;
......
......@@ -296,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
* from user-space into the cached pages.
*/
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size)
void __user *user_addr, size_t user_size, int write)
{
int rc;
struct pci_dev *pci_dev = cd->pci_dev;
......@@ -312,6 +312,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->user_addr = user_addr;
sgl->user_size = user_size;
sgl->write = write;
sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
if (get_order(sgl->sgl_size) > MAX_ORDER) {
......@@ -476,14 +477,20 @@ int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
{
int rc = 0;
size_t offset;
unsigned long res;
struct pci_dev *pci_dev = cd->pci_dev;
if (sgl->fpage) {
if (copy_to_user(sgl->user_addr, sgl->fpage + sgl->fpage_offs,
sgl->fpage_size)) {
dev_err(&pci_dev->dev, "[%s] err: copying fpage!\n",
__func__);
rc = -EFAULT;
if (sgl->write) {
res = copy_to_user(sgl->user_addr,
sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
if (res) {
dev_err(&pci_dev->dev,
"[%s] err: copying fpage! (res=%lu)\n",
__func__, res);
rc = -EFAULT;
}
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
sgl->fpage_dma_addr);
......@@ -491,12 +498,16 @@ int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
sgl->fpage_dma_addr = 0;
}
if (sgl->lpage) {
if (copy_to_user(sgl->user_addr + sgl->user_size -
sgl->lpage_size, sgl->lpage,
sgl->lpage_size)) {
dev_err(&pci_dev->dev, "[%s] err: copying lpage!\n",
__func__);
rc = -EFAULT;
if (sgl->write) {
offset = sgl->user_size - sgl->lpage_size;
res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
sgl->lpage_size);
if (res) {
dev_err(&pci_dev->dev,
"[%s] err: copying lpage! (res=%lu)\n",
__func__, res);
rc = -EFAULT;
}
}
__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
sgl->lpage_dma_addr);
......@@ -599,14 +610,14 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* pin user pages in memory */
rc = get_user_pages_fast(data & PAGE_MASK, /* page aligned addr */
m->nr_pages,
1, /* write by caller */
m->write, /* readable/writable */
m->page_list); /* ptrs to pages */
if (rc < 0)
goto fail_get_user_pages;
/* assumption: get_user_pages can be killed by signals. */
if (rc < m->nr_pages) {
free_user_pages(m->page_list, rc, 0);
free_user_pages(m->page_list, rc, m->write);
rc = -EFAULT;
goto fail_get_user_pages;
}
......@@ -618,7 +629,7 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
return 0;
fail_free_user_pages:
free_user_pages(m->page_list, m->nr_pages, 0);
free_user_pages(m->page_list, m->nr_pages, m->write);
fail_get_user_pages:
kfree(m->page_list);
......@@ -651,7 +662,7 @@ int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m,
genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
if (m->page_list) {
free_user_pages(m->page_list, m->nr_pages, 1);
free_user_pages(m->page_list, m->nr_pages, m->write);
kfree(m->page_list);
m->page_list = NULL;
......
......@@ -122,7 +122,7 @@ struct crashtype {
}
/* Define the possible types of crashes that can be triggered. */
struct crashtype crashtypes[] = {
static const struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
......@@ -188,8 +188,8 @@ struct crashtype crashtypes[] = {
/* Global kprobe entry and crashtype. */
static struct kprobe *lkdtm_kprobe;
struct crashpoint *lkdtm_crashpoint;
struct crashtype *lkdtm_crashtype;
static struct crashpoint *lkdtm_crashpoint;
static const struct crashtype *lkdtm_crashtype;
/* Module parameters */
static int recur_count = -1;
......@@ -212,7 +212,7 @@ MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
/* Return the crashtype number or NULL if the name is invalid */
static struct crashtype *find_crashtype(const char *name)
static const struct crashtype *find_crashtype(const char *name)
{
int i;
......@@ -228,7 +228,7 @@ static struct crashtype *find_crashtype(const char *name)
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
*/
static noinline void lkdtm_do_action(struct crashtype *crashtype)
static noinline void lkdtm_do_action(const struct crashtype *crashtype)
{
if (WARN_ON(!crashtype || !crashtype->func))
return;
......@@ -236,7 +236,7 @@ static noinline void lkdtm_do_action(struct crashtype *crashtype)
}
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
struct crashtype *crashtype)
const struct crashtype *crashtype)
{
int ret;
......@@ -300,7 +300,7 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
size_t count, loff_t *off)
{
struct crashpoint *crashpoint = file_inode(f)->i_private;
struct crashtype *crashtype = NULL;
const struct crashtype *crashtype = NULL;
char *buf;
int err;
......@@ -368,7 +368,7 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off)
{
struct crashtype *crashtype;
const struct crashtype *crashtype;
char *buf;
if (count >= PAGE_SIZE)
......@@ -404,7 +404,7 @@ static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
{
struct crashpoint *crashpoint = NULL;
struct crashtype *crashtype = NULL;
const struct crashtype *crashtype = NULL;
int ret = -EINVAL;
int i;
......
......@@ -23,5 +23,4 @@
EXPORT_TRACEPOINT_SYMBOL(mei_reg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_reg_write);
EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_read);
EXPORT_TRACEPOINT_SYMBOL(mei_pci_cfg_write);
#endif /* __CHECKER__ */
......@@ -83,25 +83,6 @@ TRACE_EVENT(mei_pci_cfg_read,
__get_str(dev), __entry->reg, __entry->offs, __entry->val)
);
TRACE_EVENT(mei_pci_cfg_write,
TP_PROTO(const struct device *dev, const char *reg, u32 offs, u32 val),
TP_ARGS(dev, reg, offs, val),
TP_STRUCT__entry(
__string(dev, dev_name(dev))
__field(const char *, reg)
__field(u32, offs)
__field(u32, val)
),
TP_fast_assign(
__assign_str(dev, dev_name(dev))
__entry->reg = reg;
__entry->offs = offs;
__entry->val = val;
),
TP_printk("[%s] pci cfg write %s[%#x] = %#x",
__get_str(dev), __entry->reg, __entry->offs, __entry->val)
);
#endif /* _MEI_TRACE_H_ */
/* This part must be outside protection */
......
menu "Intel MIC & related support"
comment "Intel MIC Bus Driver"
config INTEL_MIC_BUS
......@@ -150,3 +152,5 @@ config VOP
if VOP
source "drivers/vhost/Kconfig.vringh"
endif
endmenu
......@@ -123,6 +123,17 @@ config NVMEM_SUNXI_SID
This driver can also be built as a module. If so, the module
will be called nvmem_sunxi_sid.
config UNIPHIER_EFUSE
tristate "UniPhier SoCs eFuse support"
depends on ARCH_UNIPHIER || COMPILE_TEST
depends on HAS_IOMEM
help
This is a simple driver to dump specified values of UniPhier SoC
from eFuse.
This driver can also be built as a module. If so, the module
will be called nvmem-uniphier-efuse.
config NVMEM_VF610_OCOTP
tristate "VF610 SoC OCOTP support"
depends on SOC_VF610 || COMPILE_TEST
......@@ -135,13 +146,33 @@ config NVMEM_VF610_OCOTP
be called nvmem-vf610-ocotp.
config MESON_EFUSE
tristate "Amlogic eFuse Support"
tristate "Amlogic Meson GX eFuse Support"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
help
This is a driver to retrieve specific values from the eFuse found on
the Amlogic Meson SoCs.
the Amlogic Meson GX SoCs.
This driver can also be built as a module. If so, the module
will be called nvmem_meson_efuse.
config MESON_MX_EFUSE
tristate "Amlogic Meson6/Meson8/Meson8b eFuse Support"
depends on ARCH_MESON || COMPILE_TEST
help
This is a driver to retrieve specific values from the eFuse found on
the Amlogic Meson6, Meson8 and Meson8b SoCs.
This driver can also be built as a module. If so, the module
will be called nvmem_meson_mx_efuse.
config NVMEM_SNVS_LPGPR
tristate "Support for Low Power General Purpose Register"
depends on SOC_IMX6 || COMPILE_TEST
help
This is a driver for Low Power General Purpose Register (LPGPR) available on
i.MX6 SoCs in Secure Non-Volatile Storage (SNVS) of this chip.
This driver can also be built as a module. If so, the module
will be called nvmem-snvs-lpgpr.
endif
......@@ -27,7 +27,13 @@ obj-$(CONFIG_ROCKCHIP_EFUSE) += nvmem_rockchip_efuse.o
nvmem_rockchip_efuse-y := rockchip-efuse.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
obj-$(CONFIG_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
nvmem-uniphier-efuse-y := uniphier-efuse.o
obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o
nvmem_meson_efuse-y := meson-efuse.o
obj-$(CONFIG_MESON_MX_EFUSE) += nvmem_meson_mx_efuse.o
nvmem_meson_mx_efuse-y := meson-mx-efuse.o
obj-$(CONFIG_NVMEM_SNVS_LPGPR) += nvmem_snvs_lpgpr.o
nvmem_snvs_lpgpr-y := snvs_lpgpr.o
......@@ -232,7 +232,6 @@ static struct nvmem_config bcm_otpc_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
.owner = THIS_MODULE,
.reg_read = bcm_otpc_read,
.reg_write = bcm_otpc_write,
};
......
......@@ -462,6 +462,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->id = rval;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride;
nvmem->word_size = config->word_size;
nvmem->size = config->size;
......@@ -615,7 +617,7 @@ static struct nvmem_device *nvmem_find(const char *name)
return to_nvmem_device(d);
}
#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_device_get() - Get nvmem device from a given id
*
......@@ -753,7 +755,7 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
return cell;
}
#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
#if IS_ENABLED(CONFIG_OF)
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
......@@ -946,8 +948,7 @@ void nvmem_cell_put(struct nvmem_cell *cell)
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
void *buf)
static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
{
u8 *p, *b;
int i, bit_offset = cell->bit_offset;
......@@ -1028,8 +1029,8 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
}
EXPORT_SYMBOL_GPL(nvmem_cell_read);
static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
u8 *_buf, int len)
static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
u8 *_buf, int len)
{
struct nvmem_device *nvmem = cell->nvmem;
int i, rc, nbits, bit_offset = cell->bit_offset;
......
......@@ -34,7 +34,6 @@ struct imx_iim_drvdata {
struct iim_priv {
void __iomem *base;
struct clk *clk;
struct nvmem_config nvmem;
};
static int imx_iim_read(void *context, unsigned int offset,
......@@ -108,7 +107,7 @@ static int imx_iim_probe(struct platform_device *pdev)
struct resource *res;
struct iim_priv *iim;
struct nvmem_device *nvmem;
struct nvmem_config *cfg;
struct nvmem_config cfg = {};
const struct imx_iim_drvdata *drvdata = NULL;
iim = devm_kzalloc(dev, sizeof(*iim), GFP_KERNEL);
......@@ -130,19 +129,16 @@ static int imx_iim_probe(struct platform_device *pdev)
if (IS_ERR(iim->clk))
return PTR_ERR(iim->clk);
cfg = &iim->nvmem;
cfg.name = "imx-iim",
cfg.read_only = true,
cfg.word_size = 1,
cfg.stride = 1,
cfg.reg_read = imx_iim_read,
cfg.dev = dev;
cfg.size = drvdata->nregs;
cfg.priv = iim;
cfg->name = "imx-iim",
cfg->read_only = true,
cfg->word_size = 1,
cfg->stride = 1,
cfg->owner = THIS_MODULE,
cfg->reg_read = imx_iim_read,
cfg->dev = dev;
cfg->size = drvdata->nregs;
cfg->priv = iim;
nvmem = nvmem_register(cfg);
nvmem = nvmem_register(&cfg);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
......
......@@ -40,14 +40,19 @@
#define IMX_OCOTP_ADDR_CTRL_SET 0x0004
#define IMX_OCOTP_ADDR_CTRL_CLR 0x0008
#define IMX_OCOTP_ADDR_TIMING 0x0010
#define IMX_OCOTP_ADDR_DATA 0x0020
#define IMX_OCOTP_ADDR_DATA0 0x0020
#define IMX_OCOTP_ADDR_DATA1 0x0030
#define IMX_OCOTP_ADDR_DATA2 0x0040
#define IMX_OCOTP_ADDR_DATA3 0x0050
#define IMX_OCOTP_BM_CTRL_ADDR 0x0000007F
#define IMX_OCOTP_BM_CTRL_BUSY 0x00000100
#define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
#define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
#define DEF_RELAX 20 /* > 16.5ns */
#define DEF_RELAX 20 /* > 16.5ns */
#define DEF_FSOURCE 1001 /* > 1000 ns */
#define DEF_STROBE_PROG 10000 /* IPG clocks */
#define IMX_OCOTP_WR_UNLOCK 0x3E770000
#define IMX_OCOTP_READ_LOCKED_VAL 0xBADABADA
......@@ -57,10 +62,16 @@ struct ocotp_priv {
struct device *dev;
struct clk *clk;
void __iomem *base;
unsigned int nregs;
const struct ocotp_params *params;
struct nvmem_config *config;
};
struct ocotp_params {
unsigned int nregs;
unsigned int bank_address_words;
void (*set_timing)(struct ocotp_priv *priv);
};
static int imx_ocotp_wait_for_busy(void __iomem *base, u32 flags)
{
int count;
......@@ -121,8 +132,8 @@ static int imx_ocotp_read(void *context, unsigned int offset,
index = offset >> 2;
count = bytes >> 2;
if (count > (priv->nregs - index))
count = priv->nregs - index;
if (count > (priv->params->nregs - index))
count = priv->params->nregs - index;
mutex_lock(&ocotp_mutex);
......@@ -160,6 +171,52 @@ static int imx_ocotp_read(void *context, unsigned int offset,
return ret;
}
static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
{
unsigned long clk_rate = 0;
unsigned long strobe_read, relax, strobe_prog;
u32 timing = 0;
/* 47.3.1.3.1
* Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
* fields with timing values to match the current frequency of the
* ipg_clk. OTP writes will work at maximum bus frequencies as long
* as the HW_OCOTP_TIMING parameters are set correctly.
*/
clk_rate = clk_get_rate(priv->clk);
relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
timing = strobe_prog & 0x00000FFF;
timing |= (relax << 12) & 0x0000F000;
timing |= (strobe_read << 16) & 0x003F0000;
writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
}
static void imx_ocotp_set_imx7_timing(struct ocotp_priv *priv)
{
unsigned long clk_rate = 0;
u64 fsource, strobe_prog;
u32 timing = 0;
/* i.MX 7Solo Applications Processor Reference Manual, Rev. 0.1
* 6.4.3.3
*/
clk_rate = clk_get_rate(priv->clk);
fsource = DIV_ROUND_UP_ULL((u64)clk_rate * DEF_FSOURCE,
NSEC_PER_SEC) + 1;
strobe_prog = DIV_ROUND_CLOSEST_ULL((u64)clk_rate * DEF_STROBE_PROG,
NSEC_PER_SEC) + 1;
timing = strobe_prog & 0x00000FFF;
timing |= (fsource << 12) & 0x000FF000;
writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
}
static int imx_ocotp_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
......@@ -167,11 +224,9 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
u32 *buf = val;
int ret;
unsigned long clk_rate = 0;
unsigned long strobe_read, relax, strobe_prog;
u32 timing = 0;
u32 ctrl;
u8 waddr;
u8 word = 0;
/* allow only writing one complete OTP word at a time */
if ((bytes != priv->config->word_size) ||
......@@ -187,23 +242,8 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
return ret;
}
/* 47.3.1.3.1
* Program HW_OCOTP_TIMING[STROBE_PROG] and HW_OCOTP_TIMING[RELAX]
* fields with timing values to match the current frequency of the
* ipg_clk. OTP writes will work at maximum bus frequencies as long
* as the HW_OCOTP_TIMING parameters are set correctly.
*/
clk_rate = clk_get_rate(priv->clk);
relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
timing = strobe_prog & 0x00000FFF;
timing |= (relax << 12) & 0x0000F000;
timing |= (strobe_read << 16) & 0x003F0000;
writel(timing, priv->base + IMX_OCOTP_ADDR_TIMING);
/* Setup the write timing values */
priv->params->set_timing(priv);
/* 47.3.1.3.2
* Check that HW_OCOTP_CTRL[BUSY] and HW_OCOTP_CTRL[ERROR] are clear.
......@@ -224,8 +264,23 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* description. Both the unlock code and address can be written in the
* same operation.
*/
/* OTP write/read address specifies one of 128 word address locations */
waddr = offset / 4;
if (priv->params->bank_address_words != 0) {
/*
* In banked/i.MX7 mode the OTP register bank goes into waddr
* see i.MX 7Solo Applications Processor Reference Manual, Rev.
* 0.1 section 6.4.3.1
*/
offset = offset / priv->config->word_size;
waddr = offset / priv->params->bank_address_words;
word = offset & (priv->params->bank_address_words - 1);
} else {
/*
* Non-banked i.MX6 mode.
* OTP write/read address specifies one of 128 word address
* locations
*/
waddr = offset / 4;
}
ctrl = readl(priv->base + IMX_OCOTP_ADDR_CTRL);
ctrl &= ~IMX_OCOTP_BM_CTRL_ADDR;
......@@ -251,8 +306,43 @@ static int imx_ocotp_write(void *context, unsigned int offset, void *val,
* shift right (with zero fill). This shifting is required to program
* the OTP serially. During the write operation, HW_OCOTP_DATA cannot be
* modified.
* Note: on i.MX7 there are four data fields to write for banked write
* with the fuse blowing operation only taking place after data0
* has been written. This is why data0 must always be the last
* register written.
*/
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA);
if (priv->params->bank_address_words != 0) {
/* Banked/i.MX7 mode */
switch (word) {
case 0:
writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
case 1:
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
case 2:
writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
case 3:
writel(0, priv->base + IMX_OCOTP_ADDR_DATA1);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA2);
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA3);
writel(0, priv->base + IMX_OCOTP_ADDR_DATA0);
break;
}
} else {
/* Non-banked i.MX6 mode */
writel(*buf, priv->base + IMX_OCOTP_ADDR_DATA0);
}
/* 47.4.1.4.5
* Once complete, the controller will clear BUSY. A write request to a
......@@ -303,17 +393,46 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
.read_only = false,
.word_size = 4,
.stride = 4,
.owner = THIS_MODULE,
.reg_read = imx_ocotp_read,
.reg_write = imx_ocotp_write,
};
static const struct ocotp_params imx6q_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
};
static const struct ocotp_params imx6sl_params = {
.nregs = 64,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
};
static const struct ocotp_params imx6sx_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
};
static const struct ocotp_params imx6ul_params = {
.nregs = 128,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
};
static const struct ocotp_params imx7d_params = {
.nregs = 64,
.bank_address_words = 4,
.set_timing = imx_ocotp_set_imx7_timing,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", (void *)128 },
{ .compatible = "fsl,imx6sl-ocotp", (void *)64 },
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
{ .compatible = "fsl,imx6ul-ocotp", (void *)128 },
{ .compatible = "fsl,imx7d-ocotp", (void *)64 },
{ .compatible = "fsl,imx6q-ocotp", .data = &imx6q_params },
{ .compatible = "fsl,imx6sl-ocotp", .data = &imx6sl_params },
{ .compatible = "fsl,imx6sx-ocotp", .data = &imx6sx_params },
{ .compatible = "fsl,imx6ul-ocotp", .data = &imx6ul_params },
{ .compatible = "fsl,imx7d-ocotp", .data = &imx7d_params },
{ },
};
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
......@@ -342,8 +461,8 @@ static int imx_ocotp_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
of_id = of_match_device(imx_ocotp_dt_ids, dev);
priv->nregs = (unsigned long)of_id->data;
imx_ocotp_nvmem_config.size = 4 * priv->nregs;
priv->params = of_device_get_match_data(&pdev->dev);
imx_ocotp_nvmem_config.size = 4 * priv->params->nregs;
imx_ocotp_nvmem_config.dev = dev;
imx_ocotp_nvmem_config.priv = priv;
priv->config = &imx_ocotp_nvmem_config;
......@@ -375,5 +494,5 @@ static struct platform_driver imx_ocotp_driver = {
module_platform_driver(imx_ocotp_driver);
MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
MODULE_DESCRIPTION("i.MX6 OCOTP fuse box driver");
MODULE_DESCRIPTION("i.MX6/i.MX7 OCOTP fuse box driver");
MODULE_LICENSE("GPL v2");
......@@ -159,7 +159,6 @@ static struct nvmem_config lpc18xx_nvmem_config = {
.word_size = 4,
.reg_read = lpc18xx_eeprom_read,
.reg_write = lpc18xx_eeprom_gather_write,
.owner = THIS_MODULE,
};
static int lpc18xx_eeprom_probe(struct platform_device *pdev)
......
......@@ -64,7 +64,6 @@ static struct nvmem_config lpc18xx_otp_nvmem_config = {
.read_only = true,
.word_size = LPC18XX_OTP_WORD_SIZE,
.stride = LPC18XX_OTP_WORD_SIZE,
.owner = THIS_MODULE,
.reg_read = lpc18xx_otp_read,
};
......
/*
* Amlogic eFuse Driver
* Amlogic Meson GX eFuse Driver
*
* Copyright (c) 2016 Endless Computers, Inc.
* Author: Carlo Caione <carlo@endlessm.com>
......@@ -37,7 +37,6 @@ static int meson_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "meson-efuse",
.owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
......@@ -89,5 +88,5 @@ static struct platform_driver meson_efuse_driver = {
module_platform_driver(meson_efuse_driver);
MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>");
MODULE_DESCRIPTION("Amlogic Meson NVMEM driver");
MODULE_DESCRIPTION("Amlogic Meson GX NVMEM driver");
MODULE_LICENSE("GPL v2");
/*
* Amlogic Meson6, Meson8 and Meson8b eFuse Driver
*
* Copyright (c) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#define MESON_MX_EFUSE_CNTL1 0x04
#define MESON_MX_EFUSE_CNTL1_PD_ENABLE BIT(27)
#define MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY BIT(26)
#define MESON_MX_EFUSE_CNTL1_AUTO_RD_START BIT(25)
#define MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE BIT(24)
#define MESON_MX_EFUSE_CNTL1_BYTE_WR_DATA GENMASK(23, 16)
#define MESON_MX_EFUSE_CNTL1_AUTO_WR_BUSY BIT(14)
#define MESON_MX_EFUSE_CNTL1_AUTO_WR_START BIT(13)
#define MESON_MX_EFUSE_CNTL1_AUTO_WR_ENABLE BIT(12)
#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET BIT(11)
#define MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK GENMASK(10, 0)
#define MESON_MX_EFUSE_CNTL2 0x08
#define MESON_MX_EFUSE_CNTL4 0x10
#define MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE BIT(10)
struct meson_mx_efuse_platform_data {
const char *name;
unsigned int word_size;
};
struct meson_mx_efuse {
void __iomem *base;
struct clk *core_clk;
struct nvmem_device *nvmem;
struct nvmem_config config;
};
static void meson_mx_efuse_mask_bits(struct meson_mx_efuse *efuse, u32 reg,
u32 mask, u32 set)
{
u32 data;
data = readl(efuse->base + reg);
data &= ~mask;
data |= (set & mask);
writel(data, efuse->base + reg);
}
static int meson_mx_efuse_hw_enable(struct meson_mx_efuse *efuse)
{
int err;
err = clk_prepare_enable(efuse->core_clk);
if (err)
return err;
/* power up the efuse */
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_PD_ENABLE, 0);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL4,
MESON_MX_EFUSE_CNTL4_ENCRYPT_ENABLE, 0);
return 0;
}
static void meson_mx_efuse_hw_disable(struct meson_mx_efuse *efuse)
{
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_PD_ENABLE,
MESON_MX_EFUSE_CNTL1_PD_ENABLE);
clk_disable_unprepare(efuse->core_clk);
}
static int meson_mx_efuse_read_addr(struct meson_mx_efuse *efuse,
unsigned int addr, u32 *value)
{
int err;
u32 regval;
/* write the address to read */
regval = FIELD_PREP(MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, addr);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_MASK, regval);
/* inform the hardware that we changed the address */
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_BYTE_ADDR_SET, 0);
/* start the read process */
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_START,
MESON_MX_EFUSE_CNTL1_AUTO_RD_START);
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_START, 0);
/*
* perform a dummy read to ensure that the HW has the RD_BUSY bit set
* when polling for the status below.
*/
readl(efuse->base + MESON_MX_EFUSE_CNTL1);
err = readl_poll_timeout_atomic(efuse->base + MESON_MX_EFUSE_CNTL1,
regval,
(!(regval & MESON_MX_EFUSE_CNTL1_AUTO_RD_BUSY)),
1, 1000);
if (err) {
dev_err(efuse->config.dev,
"Timeout while reading efuse address %u\n", addr);
return err;
}
*value = readl(efuse->base + MESON_MX_EFUSE_CNTL2);
return 0;
}
static int meson_mx_efuse_read(void *context, unsigned int offset,
void *buf, size_t bytes)
{
struct meson_mx_efuse *efuse = context;
u32 tmp;
int err, i, addr;
err = meson_mx_efuse_hw_enable(efuse);
if (err)
return err;
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE);
for (i = offset; i < offset + bytes; i += efuse->config.word_size) {
addr = i / efuse->config.word_size;
err = meson_mx_efuse_read_addr(efuse, addr, &tmp);
if (err)
break;
memcpy(buf + i, &tmp, efuse->config.word_size);
}
meson_mx_efuse_mask_bits(efuse, MESON_MX_EFUSE_CNTL1,
MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, 0);
meson_mx_efuse_hw_disable(efuse);
return err;
}
static const struct meson_mx_efuse_platform_data meson6_efuse_data = {
.name = "meson6-efuse",
.word_size = 1,
};
static const struct meson_mx_efuse_platform_data meson8_efuse_data = {
.name = "meson8-efuse",
.word_size = 4,
};
static const struct meson_mx_efuse_platform_data meson8b_efuse_data = {
.name = "meson8b-efuse",
.word_size = 4,
};
static const struct of_device_id meson_mx_efuse_match[] = {
{ .compatible = "amlogic,meson6-efuse", .data = &meson6_efuse_data },
{ .compatible = "amlogic,meson8-efuse", .data = &meson8_efuse_data },
{ .compatible = "amlogic,meson8b-efuse", .data = &meson8b_efuse_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_mx_efuse_match);
static int meson_mx_efuse_probe(struct platform_device *pdev)
{
const struct meson_mx_efuse_platform_data *drvdata;
struct meson_mx_efuse *efuse;
struct resource *res;
drvdata = of_device_get_match_data(&pdev->dev);
if (!drvdata)
return -EINVAL;
efuse = devm_kzalloc(&pdev->dev, sizeof(*efuse), GFP_KERNEL);
if (!efuse)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
efuse->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(efuse->base))
return PTR_ERR(efuse->base);
efuse->config.name = devm_kstrdup(&pdev->dev, drvdata->name,
GFP_KERNEL);
efuse->config.owner = THIS_MODULE;
efuse->config.dev = &pdev->dev;
efuse->config.priv = efuse;
efuse->config.stride = drvdata->word_size;
efuse->config.word_size = drvdata->word_size;
efuse->config.size = SZ_512;
efuse->config.read_only = true;
efuse->config.reg_read = meson_mx_efuse_read;
efuse->core_clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(efuse->core_clk)) {
dev_err(&pdev->dev, "Failed to get core clock\n");
return PTR_ERR(efuse->core_clk);
}
efuse->nvmem = nvmem_register(&efuse->config);
if (IS_ERR(efuse->nvmem))
return PTR_ERR(efuse->nvmem);
platform_set_drvdata(pdev, efuse);
return 0;
}
static int meson_mx_efuse_remove(struct platform_device *pdev)
{
struct meson_mx_efuse *efuse = platform_get_drvdata(pdev);
return nvmem_unregister(efuse->nvmem);
}
static struct platform_driver meson_mx_efuse_driver = {
.probe = meson_mx_efuse_probe,
.remove = meson_mx_efuse_remove,
.driver = {
.name = "meson-mx-efuse",
.of_match_table = meson_mx_efuse_match,
},
};
module_platform_driver(meson_mx_efuse_driver);
MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
MODULE_DESCRIPTION("Amlogic Meson MX eFuse NVMEM driver");
MODULE_LICENSE("GPL v2");
......@@ -18,15 +18,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
struct mtk_efuse_priv {
void __iomem *base;
};
static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
*val++ = readl(base + reg + (i++ * 4));
*val++ = readl(priv->base + reg + (i++ * 4));
return 0;
}
......@@ -34,12 +38,12 @@ static int mtk_reg_read(void *context,
static int mtk_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
struct mtk_efuse_priv *priv = context;
u32 *val = _val;
int i = 0, words = bytes / 4;
while (words--)
writel(*val++, base + reg + (i++ * 4));
writel(*val++, priv->base + reg + (i++ * 4));
return 0;
}
......@@ -49,27 +53,26 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
void __iomem *base;
struct nvmem_config econfig = {};
struct mtk_efuse_priv *priv;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
econfig = devm_kzalloc(dev, sizeof(*econfig), GFP_KERNEL);
if (!econfig)
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
econfig->stride = 4;
econfig->word_size = 4;
econfig->reg_read = mtk_reg_read;
econfig->reg_write = mtk_reg_write;
econfig->size = resource_size(res);
econfig->priv = base;
econfig->dev = dev;
econfig->owner = THIS_MODULE;
nvmem = nvmem_register(econfig);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
econfig.stride = 4;
econfig.word_size = 4;
econfig.reg_read = mtk_reg_read;
econfig.reg_write = mtk_reg_write;
econfig.size = resource_size(res);
econfig.priv = priv;
econfig.dev = dev;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
......
......@@ -118,7 +118,6 @@ static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
.stride = 16,
.word_size = 4,
.owner = THIS_MODULE,
.reg_read = mxs_ocotp_read,
};
......
......@@ -17,15 +17,19 @@
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
struct qfprom_priv {
void __iomem *base;
};
static int qfprom_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
*val++ = readb(base + reg + i++);
*val++ = readb(priv->base + reg + i++);
return 0;
}
......@@ -33,12 +37,12 @@ static int qfprom_reg_read(void *context,
static int qfprom_reg_write(void *context,
unsigned int reg, void *_val, size_t bytes)
{
void __iomem *base = context;
struct qfprom_priv *priv = context;
u8 *val = _val;
int i = 0, words = bytes;
while (words--)
writeb(*val++, base + reg + i++);
writeb(*val++, priv->base + reg + i++);
return 0;
}
......@@ -52,7 +56,6 @@ static int qfprom_remove(struct platform_device *pdev)
static struct nvmem_config econfig = {
.name = "qfprom",
.owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.reg_read = qfprom_reg_read,
......@@ -64,16 +67,20 @@ static int qfprom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
void __iomem *base;
struct qfprom_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
priv->base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
econfig.size = resource_size(res);
econfig.dev = dev;
econfig.priv = base;
econfig.priv = priv;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
......
......@@ -149,7 +149,6 @@ static int rockchip_rk3399_efuse_read(void *context, unsigned int offset,
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
.owner = THIS_MODULE,
.stride = 1,
.word_size = 1,
.read_only = true,
......@@ -177,6 +176,10 @@ static const struct of_device_id rockchip_efuse_match[] = {
.compatible = "rockchip,rk3288-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3368-efuse",
.data = (void *)&rockchip_rk3288_efuse_read,
},
{
.compatible = "rockchip,rk3399-efuse",
.data = (void *)&rockchip_rk3399_efuse_read,
......
/*
* Copyright (c) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#define IMX6Q_SNVS_HPLR 0x00
#define IMX6Q_GPR_SL BIT(5)
#define IMX6Q_SNVS_LPLR 0x34
#define IMX6Q_GPR_HL BIT(5)
#define IMX6Q_SNVS_LPGPR 0x68
struct snvs_lpgpr_cfg {
int offset;
int offset_hplr;
int offset_lplr;
};
struct snvs_lpgpr_priv {
struct device_d *dev;
struct regmap *regmap;
struct nvmem_config cfg;
const struct snvs_lpgpr_cfg *dcfg;
};
static const struct snvs_lpgpr_cfg snvs_lpgpr_cfg_imx6q = {
.offset = IMX6Q_SNVS_LPGPR,
.offset_hplr = IMX6Q_SNVS_HPLR,
.offset_lplr = IMX6Q_SNVS_LPLR,
};
static int snvs_lpgpr_write(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct snvs_lpgpr_priv *priv = context;
const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
unsigned int lock_reg;
int ret;
ret = regmap_read(priv->regmap, dcfg->offset_hplr, &lock_reg);
if (ret < 0)
return ret;
if (lock_reg & IMX6Q_GPR_SL)
return -EPERM;
ret = regmap_read(priv->regmap, dcfg->offset_lplr, &lock_reg);
if (ret < 0)
return ret;
if (lock_reg & IMX6Q_GPR_HL)
return -EPERM;
return regmap_bulk_write(priv->regmap, dcfg->offset + offset, val,
bytes / 4);
}
static int snvs_lpgpr_read(void *context, unsigned int offset, void *val,
size_t bytes)
{
struct snvs_lpgpr_priv *priv = context;
const struct snvs_lpgpr_cfg *dcfg = priv->dcfg;
return regmap_bulk_read(priv->regmap, dcfg->offset + offset,
val, bytes / 4);
}
static int snvs_lpgpr_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct device_node *syscon_node;
struct snvs_lpgpr_priv *priv;
struct nvmem_config *cfg;
struct nvmem_device *nvmem;
const struct snvs_lpgpr_cfg *dcfg;
if (!node)
return -ENOENT;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dcfg = of_device_get_match_data(dev);
if (!dcfg)
return -EINVAL;
syscon_node = of_get_parent(node);
if (!syscon_node)
return -ENODEV;
priv->regmap = syscon_node_to_regmap(syscon_node);
of_node_put(syscon_node);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
priv->dcfg = dcfg;
cfg = &priv->cfg;
cfg->priv = priv;
cfg->name = dev_name(dev);
cfg->dev = dev;
cfg->stride = 4,
cfg->word_size = 4,
cfg->size = 4,
cfg->owner = THIS_MODULE,
cfg->reg_read = snvs_lpgpr_read,
cfg->reg_write = snvs_lpgpr_write,
nvmem = nvmem_register(cfg);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
platform_set_drvdata(pdev, nvmem);
return 0;
}
static int snvs_lpgpr_remove(struct platform_device *pdev)
{
struct nvmem_device *nvmem = platform_get_drvdata(pdev);
return nvmem_unregister(nvmem);
}
static const struct of_device_id snvs_lpgpr_dt_ids[] = {
{ .compatible = "fsl,imx6q-snvs-lpgpr", .data = &snvs_lpgpr_cfg_imx6q },
{ .compatible = "fsl,imx6ul-snvs-lpgpr",
.data = &snvs_lpgpr_cfg_imx6q },
{ },
};
MODULE_DEVICE_TABLE(of, snvs_lpgpr_dt_ids);
static struct platform_driver snvs_lpgpr_driver = {
.probe = snvs_lpgpr_probe,
.remove = snvs_lpgpr_remove,
.driver = {
.name = "snvs_lpgpr",
.of_match_table = snvs_lpgpr_dt_ids,
},
};
module_platform_driver(snvs_lpgpr_driver);
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
MODULE_DESCRIPTION("Low Power General Purpose Register in i.MX6 Secure Non-Volatile Storage");
MODULE_LICENSE("GPL v2");
......@@ -40,7 +40,6 @@ static struct nvmem_config econfig = {
.read_only = true,
.stride = 4,
.word_size = 1,
.owner = THIS_MODULE,
};
struct sunxi_sid_cfg {
......@@ -199,10 +198,16 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
.need_register_readout = true,
};
static const struct sunxi_sid_cfg sun50i_a64_cfg = {
.value_offset = 0x200,
.size = 0x100,
};
static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-sid", .data = &sun4i_a10_cfg },
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
{ .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
......
/*
* UniPhier eFuse driver
*
* Copyright (C) 2017 Socionext Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
struct uniphier_efuse_priv {
void __iomem *base;
};
static int uniphier_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct uniphier_efuse_priv *priv = context;
u32 *val = _val;
int offs;
for (offs = 0; offs < bytes; offs += sizeof(u32))
*val++ = readl(priv->base + reg + offs);
return 0;
}
static int uniphier_efuse_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config econfig = {};
struct uniphier_efuse_priv *priv;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
econfig.stride = 4;
econfig.word_size = 4;
econfig.read_only = true;
econfig.reg_read = uniphier_reg_read;
econfig.size = resource_size(res);
econfig.priv = priv;
econfig.dev = dev;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
platform_set_drvdata(pdev, nvmem);
return 0;
}
static int uniphier_efuse_remove(struct platform_device *pdev)
{
struct nvmem_device *nvmem = platform_get_drvdata(pdev);
return nvmem_unregister(nvmem);
}
static const struct of_device_id uniphier_efuse_of_match[] = {
{ .compatible = "socionext,uniphier-efuse",},
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, uniphier_efuse_of_match);
static struct platform_driver uniphier_efuse_driver = {
.probe = uniphier_efuse_probe,
.remove = uniphier_efuse_remove,
.driver = {
.name = "uniphier-efuse",
.of_match_table = uniphier_efuse_of_match,
},
};
module_platform_driver(uniphier_efuse_driver);
MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
MODULE_DESCRIPTION("UniPhier eFuse driver");
MODULE_LICENSE("GPL v2");
......@@ -206,7 +206,6 @@ static int vf610_ocotp_read(void *context, unsigned int offset,
static struct nvmem_config ocotp_config = {
.name = "ocotp",
.owner = THIS_MODULE,
.stride = 4,
.word_size = 4,
.reg_read = vf610_ocotp_read,
......
......@@ -1769,7 +1769,7 @@ static size_t parport_ip32_ecp_write_data(struct parport *p,
/*--- Default parport operations ---------------------------------------*/
static __initdata struct parport_operations parport_ip32_ops = {
static const struct parport_operations parport_ip32_ops __initconst = {
.write_data = parport_ip32_write_data,
.read_data = parport_ip32_read_data,
......
......@@ -1599,7 +1599,7 @@ static ssize_t pccard_store_cis(struct file *filp, struct kobject *kobj,
}
struct bin_attribute pccard_cis_attr = {
const struct bin_attribute pccard_cis_attr = {
.attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
.size = 0x200,
.read = pccard_show_cis,
......
......@@ -152,7 +152,7 @@ void pcmcia_cleanup_irq(struct pcmcia_socket *s);
int pcmcia_setup_irq(struct pcmcia_device *p_dev);
/* cistpl.c */
extern struct bin_attribute pccard_cis_attr;
extern const struct bin_attribute pccard_cis_attr;
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr,
u_int addr, u_int len, void *ptr);
......
......@@ -380,11 +380,10 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
static void pcc_interrupt_wrapper(u_long data)
static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pr_debug("m32r_cfc: pcc_interrupt_wrapper:\n");
pcc_interrupt(0, NULL);
init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
......@@ -758,9 +757,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
poll_timer.function = pcc_interrupt_wrapper;
poll_timer.data = 0;
init_timer(&poll_timer);
timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
......
......@@ -386,10 +386,9 @@ static irqreturn_t pcc_interrupt(int irq, void *dev)
return IRQ_RETVAL(handled);
} /* pcc_interrupt */
static void pcc_interrupt_wrapper(u_long data)
static void pcc_interrupt_wrapper(struct timer_list *unused)
{
pcc_interrupt(0, NULL);
init_timer(&poll_timer);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
......@@ -729,9 +728,7 @@ static int __init init_m32r_pcc(void)
/* Finally, schedule a polling interrupt */
if (poll_interval != 0) {
poll_timer.function = pcc_interrupt_wrapper;
poll_timer.data = 0;
init_timer(&poll_timer);
timer_setup(&poll_timer, pcc_interrupt_wrapper, 0);
poll_timer.expires = jiffies + poll_interval;
add_timer(&poll_timer);
}
......
......@@ -225,6 +225,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
tb_port_info(up_port,
"PCIe tunnel activation failed, aborting\n");
tb_pci_free(tunnel);
continue;
}
list_add(&tunnel->list, &tcm->tunnel_list);
......
......@@ -511,7 +511,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
ca91cx42_bridge = image->parent;
/* Find pci_dev container of dev */
if (ca91cx42_bridge->parent == NULL) {
if (!ca91cx42_bridge->parent) {
dev_err(ca91cx42_bridge->parent, "Dev entry NULL\n");
return -EINVAL;
}
......@@ -529,14 +529,12 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
memset(&image->bus_resource, 0, sizeof(struct resource));
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
if (image->bus_resource.name == NULL) {
if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
if (image->bus_resource.name == NULL) {
dev_err(ca91cx42_bridge->parent, "Unable to allocate "
"memory for resource name\n");
if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
......@@ -562,7 +560,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
if (!image->kern_base) {
dev_err(ca91cx42_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
......@@ -574,7 +572,7 @@ static int ca91cx42_alloc_resource(struct vme_master_resource *image,
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
......@@ -588,7 +586,7 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
......@@ -1036,10 +1034,8 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
dev = list->parent->parent->parent;
/* XXX descriptor must be aligned on 64-bit boundaries */
entry = kmalloc(sizeof(struct ca91cx42_dma_entry), GFP_KERNEL);
if (entry == NULL) {
dev_err(dev, "Failed to allocate memory for dma resource "
"structure\n");
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
......@@ -1052,7 +1048,7 @@ static int ca91cx42_dma_list_add(struct vme_dma_list *list,
goto err_align;
}
memset(&entry->descriptor, 0, sizeof(struct ca91cx42_dma_descriptor));
memset(&entry->descriptor, 0, sizeof(entry->descriptor));
if (dest->type == VME_DMA_VME) {
entry->descriptor.dctl |= CA91CX42_DCTL_L2V;
......@@ -1323,7 +1319,7 @@ static int ca91cx42_lm_set(struct vme_lm_resource *lm,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Location monitor callback attached, "
"can't reset\n");
......@@ -1432,7 +1428,7 @@ static int ca91cx42_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(dev, "Existing callback attached\n");
return -EBUSY;
......@@ -1567,7 +1563,7 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
if (!bridge->crcsr_kernel) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
......@@ -1618,21 +1614,15 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* We want to support more than one of each bridge so we need to
* dynamically allocate the bridge structure
*/
ca91cx42_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (ca91cx42_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
ca91cx42_bridge = kzalloc(sizeof(*ca91cx42_bridge), GFP_KERNEL);
if (!ca91cx42_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(ca91cx42_bridge);
ca91cx42_device = kzalloc(sizeof(struct ca91cx42_driver), GFP_KERNEL);
if (ca91cx42_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
ca91cx42_device = kzalloc(sizeof(*ca91cx42_device), GFP_KERNEL);
if (!ca91cx42_device) {
retval = -ENOMEM;
goto err_driver;
}
......@@ -1688,11 +1678,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add master windows to list */
for (i = 0; i < CA91C142_MAX_MASTER; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
if (master_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"master resource structure\n");
master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
......@@ -1706,7 +1693,7 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_SUPER | VME_USER | VME_PROG | VME_DATA;
master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
memset(&master_image->bus_resource, 0,
sizeof(struct resource));
sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&ca91cx42_bridge->master_resources);
......@@ -1714,11 +1701,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
if (slave_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"slave resource structure\n");
slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
......@@ -1741,11 +1725,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < CA91C142_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
if (dma_ctrlr == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"dma resource structure\n");
dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
......@@ -1762,10 +1743,8 @@ static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"location monitor resource structure\n");
lm = kmalloc(sizeof(*lm), GFP_KERNEL);
if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
......
......@@ -409,7 +409,7 @@ static void fake_lm_check(struct fake_driver *bridge, unsigned long long addr,
/* Each location monitor covers 8 bytes */
if (((lm_base + (8 * i)) <= addr) &&
((lm_base + (8 * i) + 8) > addr)) {
if (bridge->lm_callback[i] != NULL)
if (bridge->lm_callback[i])
bridge->lm_callback[i](
bridge->lm_data[i]);
}
......@@ -866,7 +866,7 @@ static int fake_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
pr_err("Location monitor callback attached, can't reset\n");
return -EBUSY;
......@@ -940,7 +940,7 @@ static int fake_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
pr_err("Existing callback attached\n");
return -EBUSY;
......@@ -978,7 +978,7 @@ static int fake_lm_detach(struct vme_lm_resource *lm, int monitor)
/* If all location monitors disabled, disable global Location Monitor */
tmp = 0;
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL)
if (bridge->lm_callback[i])
tmp = 1;
}
......@@ -1003,7 +1003,7 @@ static void *fake_alloc_consistent(struct device *parent, size_t size,
{
void *alloc = kmalloc(size, GFP_KERNEL);
if (alloc != NULL)
if (alloc)
*dma = fake_ptr_to_pci(alloc);
return alloc;
......@@ -1039,7 +1039,7 @@ static int fake_crcsr_init(struct vme_bridge *fake_bridge)
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = kzalloc(VME_CRCSR_BUF_SIZE, GFP_KERNEL);
bridge->crcsr_bus = fake_ptr_to_pci(bridge->crcsr_kernel);
if (bridge->crcsr_kernel == NULL)
if (!bridge->crcsr_kernel)
return -ENOMEM;
vstat = fake_slot_get(fake_bridge);
......@@ -1075,14 +1075,14 @@ static int __init fake_init(void)
/* If we want to support more than one bridge at some point, we need to
* dynamically allocate this so we get one per device.
*/
fake_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (fake_bridge == NULL) {
fake_bridge = kzalloc(sizeof(*fake_bridge), GFP_KERNEL);
if (!fake_bridge) {
retval = -ENOMEM;
goto err_struct;
}
fake_device = kzalloc(sizeof(struct fake_driver), GFP_KERNEL);
if (fake_device == NULL) {
fake_device = kzalloc(sizeof(*fake_device), GFP_KERNEL);
if (!fake_device) {
retval = -ENOMEM;
goto err_driver;
}
......@@ -1104,9 +1104,8 @@ static int __init fake_init(void)
/* Add master windows to list */
INIT_LIST_HEAD(&fake_bridge->master_resources);
for (i = 0; i < FAKE_MAX_MASTER; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
if (master_image == NULL) {
master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
......@@ -1131,9 +1130,8 @@ static int __init fake_init(void)
/* Add slave windows to list */
INIT_LIST_HEAD(&fake_bridge->slave_resources);
for (i = 0; i < FAKE_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
if (slave_image == NULL) {
slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
......@@ -1154,9 +1152,8 @@ static int __init fake_init(void)
/* Add location monitor to list */
INIT_LIST_HEAD(&fake_bridge->lm_resources);
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
pr_err("Failed to allocate memory for location monitor resource structure\n");
lm = kmalloc(sizeof(*lm), GFP_KERNEL);
if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
......
......@@ -741,18 +741,16 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = NULL;
kfree(image->bus_resource.name);
release_resource(&image->bus_resource);
memset(&image->bus_resource, 0, sizeof(struct resource));
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
if (image->bus_resource.name == NULL) {
if (!image->bus_resource.name) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
if (image->bus_resource.name == NULL) {
dev_err(tsi148_bridge->parent, "Unable to allocate "
"memory for resource name\n");
if (!image->bus_resource.name) {
retval = -ENOMEM;
goto err_name;
}
......@@ -778,7 +776,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
if (!image->kern_base) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
......@@ -790,7 +788,7 @@ static int tsi148_alloc_resource(struct vme_master_resource *image,
release_resource(&image->bus_resource);
err_resource:
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
err_name:
return retval;
}
......@@ -804,7 +802,7 @@ static void tsi148_free_resource(struct vme_master_resource *image)
image->kern_base = NULL;
release_resource(&image->bus_resource);
kfree(image->bus_resource.name);
memset(&image->bus_resource, 0, sizeof(struct resource));
memset(&image->bus_resource, 0, sizeof(image->bus_resource));
}
/*
......@@ -1641,10 +1639,8 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
if (entry == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"dma resource structure\n");
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
retval = -ENOMEM;
goto err_mem;
}
......@@ -1661,7 +1657,7 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
memset(&entry->descriptor, 0, sizeof(struct tsi148_dma_descriptor));
memset(&entry->descriptor, 0, sizeof(entry->descriptor));
/* Fill out source part */
switch (src->type) {
......@@ -1756,8 +1752,9 @@ static int tsi148_dma_list_add(struct vme_dma_list *list,
list_add_tail(&entry->list, &list->entries);
entry->dma_handle = dma_map_single(tsi148_bridge->parent,
&entry->descriptor,
sizeof(struct tsi148_dma_descriptor), DMA_TO_DEVICE);
&entry->descriptor,
sizeof(entry->descriptor),
DMA_TO_DEVICE);
if (dma_mapping_error(tsi148_bridge->parent, entry->dma_handle)) {
dev_err(tsi148_bridge->parent, "DMA mapping error\n");
retval = -EINVAL;
......@@ -1946,7 +1943,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
if (bridge->lm_callback[i]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
......@@ -2071,7 +2068,7 @@ static int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
if (bridge->lm_callback[monitor]) {
mutex_unlock(&lm->mtx);
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
......@@ -2208,7 +2205,7 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
if (!bridge->crcsr_kernel) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
......@@ -2294,19 +2291,15 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (tsi148_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
tsi148_bridge = kzalloc(sizeof(*tsi148_bridge), GFP_KERNEL);
if (!tsi148_bridge) {
retval = -ENOMEM;
goto err_struct;
}
vme_init_bridge(tsi148_bridge);
tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
if (tsi148_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
tsi148_device = kzalloc(sizeof(*tsi148_device), GFP_KERNEL);
if (!tsi148_device) {
retval = -ENOMEM;
goto err_driver;
}
......@@ -2371,10 +2364,9 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
master_num--;
tsi148_device->flush_image =
kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
if (tsi148_device->flush_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"flush resource structure\n");
kmalloc(sizeof(*tsi148_device->flush_image),
GFP_KERNEL);
if (!tsi148_device->flush_image) {
retval = -ENOMEM;
goto err_master;
}
......@@ -2383,17 +2375,14 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
memset(&tsi148_device->flush_image->bus_resource, 0,
sizeof(struct resource));
sizeof(tsi148_device->flush_image->bus_resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
for (i = 0; i < master_num; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
if (master_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"master resource structure\n");
master_image = kmalloc(sizeof(*master_image), GFP_KERNEL);
if (!master_image) {
retval = -ENOMEM;
goto err_master;
}
......@@ -2410,7 +2399,7 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&master_image->bus_resource, 0,
sizeof(struct resource));
sizeof(master_image->bus_resource));
master_image->kern_base = NULL;
list_add_tail(&master_image->list,
&tsi148_bridge->master_resources);
......@@ -2418,11 +2407,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add slave windows to list */
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
if (slave_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"slave resource structure\n");
slave_image = kmalloc(sizeof(*slave_image), GFP_KERNEL);
if (!slave_image) {
retval = -ENOMEM;
goto err_slave;
}
......@@ -2442,11 +2428,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Add dma engines to list */
for (i = 0; i < TSI148_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
if (dma_ctrlr == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"dma resource structure\n");
dma_ctrlr = kmalloc(sizeof(*dma_ctrlr), GFP_KERNEL);
if (!dma_ctrlr) {
retval = -ENOMEM;
goto err_dma;
}
......@@ -2465,10 +2448,8 @@ static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* Add location monitor to list */
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"location monitor resource structure\n");
lm = kmalloc(sizeof(*lm), GFP_KERNEL);
if (!lm) {
retval = -ENOMEM;
goto err_lm;
}
......
This diff is collapsed.
......@@ -148,4 +148,19 @@ config W1_SLAVE_DS28E04
If you are unsure, say N.
config W1_SLAVE_DS28E17
tristate "1-wire-to-I2C master bridge (DS28E17)"
select CRC16
depends on I2C
help
Say Y here if you want to use the DS28E17 1-wire-to-I2C master bridge.
For each DS28E17 detected, a new I2C adapter is created within the
kernel. I2C devices on that bus can be configured to be used by the
kernel and userspace tools as on any other "native" I2C bus.
This driver is also available as a module. If so, the module
will be called w1_ds28e17.
If you are unsure, say N.
endmenu
......@@ -18,3 +18,4 @@ obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o
obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o
obj-$(CONFIG_W1_SLAVE_DS2781) += w1_ds2781.o
obj-$(CONFIG_W1_SLAVE_DS28E04) += w1_ds28e04.o
obj-$(CONFIG_W1_SLAVE_DS28E17) += w1_ds28e17.o
This diff is collapsed.
This diff is collapsed.
......@@ -58,7 +58,7 @@ static u8 w1_read_bit(struct w1_master *dev);
* @dev: the master device
* @bit: 0 - write a 0, 1 - write a 0 read the level
*/
static u8 w1_touch_bit(struct w1_master *dev, int bit)
u8 w1_touch_bit(struct w1_master *dev, int bit)
{
if (dev->bus_master->touch_bit)
return dev->bus_master->touch_bit(dev->bus_master->data, bit);
......@@ -69,6 +69,7 @@ static u8 w1_touch_bit(struct w1_master *dev, int bit)
return 0;
}
}
EXPORT_SYMBOL_GPL(w1_touch_bit);
/**
* w1_write_bit() - Generates a write-0 or write-1 cycle.
......
......@@ -719,6 +719,10 @@ struct vmbus_channel {
struct vmbus_close_msg close_msg;
/* Statistics */
u64 interrupts; /* Host to Guest interrupts */
u64 sig_events; /* Guest to Host events */
/* Channel callback's invoked in softirq context */
struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
......@@ -828,6 +832,11 @@ struct vmbus_channel {
*/
struct rcu_head rcu;
/*
* For sysfs per-channel properties.
*/
struct kobject kobj;
/*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
......@@ -1089,6 +1098,7 @@ struct hv_device {
struct device device;
struct vmbus_channel *channel;
struct kset *channels_kset;
};
......
......@@ -293,6 +293,7 @@ void w1_unregister_family(struct w1_family *family);
w1_unregister_family)
u8 w1_triplet(struct w1_master *dev, int bdir);
u8 w1_touch_bit(struct w1_master *dev, int bit);
void w1_write_8(struct w1_master *, u8);
u8 w1_read_8(struct w1_master *);
int w1_reset_bus(struct w1_master *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment