Commit e1f1f073 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6:
  Staging: sep: remove driver
  Staging: batman-adv: Don't write in not allocated packet_buff
  Staging: batman-adv: Don't use net_dev after dev_put
  Staging: batman-adv: Create batman_if only on register event
  Staging: batman-adv: fix own mac address detection
  Staging: batman-adv: always reply batman icmp packets with primary mac
  Staging: batman-adv: fix batman icmp originating from secondary interface
  Staging: batman-adv: unify orig_hash_lock spinlock handling to avoid deadlocks
  Staging: batman-adv: Fix merge of linus tree
  Staging: spectra: removes unused functions
  Staging: spectra: initializa lblk variable
  Staging: spectra: removes unused variable
  Staging: spectra: remove duplicate GLOB_VERSION definition
  Staging: spectra: don't use locked_ioctl, fix build
  Staging: use new REQ_FLUSH flag, fix build breakage
  Staging: spectra: removes q->prepare_flush_fn, fix build breakage
parents 472e449c d49824c0
......@@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig"
source "drivers/staging/memrar/Kconfig"
source "drivers/staging/sep/Kconfig"
source "drivers/staging/iio/Kconfig"
source "drivers/staging/zram/Kconfig"
......
......@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL) += udlfb/
obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_VME_BUS) += vme/
obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
obj-$(CONFIG_DX_SEP) += sep/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
......
......@@ -267,6 +267,10 @@ static ssize_t store_log_level(struct kobject *kobj, struct attribute *attr,
if (atomic_read(&bat_priv->log_level) == log_level_tmp)
return count;
bat_info(net_dev, "Changing log level from: %i to: %li\n",
atomic_read(&bat_priv->log_level),
log_level_tmp);
atomic_set(&bat_priv->log_level, (unsigned)log_level_tmp);
return count;
}
......
......@@ -129,6 +129,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if)
static void update_mac_addresses(struct batman_if *batman_if)
{
if (!batman_if || !batman_if->packet_buff)
return;
addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr);
memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
......@@ -194,8 +197,6 @@ static void hardif_activate_interface(struct net_device *net_dev,
if (batman_if->if_status != IF_INACTIVE)
return;
dev_hold(batman_if->net_dev);
update_mac_addresses(batman_if);
batman_if->if_status = IF_TO_BE_ACTIVATED;
......@@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct net_device *net_dev,
(batman_if->if_status != IF_TO_BE_ACTIVATED))
return;
dev_put(batman_if->net_dev);
batman_if->if_status = IF_INACTIVE;
bat_info(net_dev, "Interface deactivated: %s\n", batman_if->dev);
......@@ -318,11 +317,13 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
if (ret != 1)
goto out;
dev_hold(net_dev);
batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
if (!batman_if) {
pr_err("Can't add interface (%s): out of memory\n",
net_dev->name);
goto out;
goto release_dev;
}
batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC);
......@@ -336,6 +337,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
batman_if->if_num = -1;
batman_if->net_dev = net_dev;
batman_if->if_status = IF_NOT_IN_USE;
batman_if->packet_buff = NULL;
INIT_LIST_HEAD(&batman_if->list);
check_known_mac_addr(batman_if->net_dev->dev_addr);
......@@ -346,6 +348,8 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev)
kfree(batman_if->dev);
free_if:
kfree(batman_if);
release_dev:
dev_put(net_dev);
out:
return NULL;
}
......@@ -374,6 +378,7 @@ static void hardif_remove_interface(struct batman_if *batman_if)
batman_if->if_status = IF_TO_BE_REMOVED;
list_del_rcu(&batman_if->list);
sysfs_del_hardif(&batman_if->hardif_obj);
dev_put(batman_if->net_dev);
call_rcu(&batman_if->rcu, hardif_free_interface);
}
......@@ -393,15 +398,13 @@ static int hard_if_event(struct notifier_block *this,
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
if (!batman_if)
if (!batman_if && event == NETDEV_REGISTER)
batman_if = hardif_add_interface(net_dev);
if (!batman_if)
goto out;
switch (event) {
case NETDEV_REGISTER:
break;
case NETDEV_UP:
hardif_activate_interface(soft_device, bat_priv, batman_if);
break;
......@@ -442,8 +445,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct bat_priv *bat_priv = netdev_priv(soft_device);
struct batman_packet *batman_packet;
struct batman_if *batman_if;
struct net_device_stats *stats;
struct rtnl_link_stats64 temp;
int ret;
skb = skb_share_check(skb, GFP_ATOMIC);
......@@ -479,12 +480,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
if (batman_if->if_status != IF_ACTIVE)
goto err_free;
stats = (struct net_device_stats *)dev_get_stats(skb->dev, &temp);
if (stats) {
stats->rx_packets++;
stats->rx_bytes += skb->len;
}
batman_packet = (struct batman_packet *)skb->data;
if (batman_packet->version != COMPAT_VERSION) {
......
......@@ -67,6 +67,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
INIT_LIST_HEAD(&socket_client->queue_list);
socket_client->queue_len = 0;
socket_client->index = i;
socket_client->bat_priv = inode->i_private;
spin_lock_init(&socket_client->lock);
init_waitqueue_head(&socket_client->queue_wait);
......@@ -151,9 +152,8 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
static ssize_t bat_socket_write(struct file *file, const char __user *buff,
size_t len, loff_t *off)
{
/* FIXME: each orig_node->batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
struct socket_client *socket_client = file->private_data;
struct bat_priv *bat_priv = socket_client->bat_priv;
struct icmp_packet_rr icmp_packet;
struct orig_node *orig_node;
struct batman_if *batman_if;
......@@ -168,6 +168,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
return -EINVAL;
}
if (!bat_priv->primary_if)
return -EFAULT;
if (len >= sizeof(struct icmp_packet_rr))
packet_len = sizeof(struct icmp_packet_rr);
......@@ -223,7 +226,8 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
if (batman_if->if_status != IF_ACTIVE)
goto dst_unreach;
memcpy(icmp_packet.orig, batman_if->net_dev->dev_addr, ETH_ALEN);
memcpy(icmp_packet.orig,
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
if (packet_len == sizeof(struct icmp_packet_rr))
memcpy(icmp_packet.rr, batman_if->net_dev->dev_addr, ETH_ALEN);
......@@ -271,7 +275,7 @@ int bat_socket_setup(struct bat_priv *bat_priv)
goto err;
d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
bat_priv->debug_dir, NULL, &fops);
bat_priv->debug_dir, bat_priv, &fops);
if (d)
goto err;
......
......@@ -250,10 +250,13 @@ int choose_orig(void *data, int32_t size)
int is_my_mac(uint8_t *addr)
{
struct batman_if *batman_if;
rcu_read_lock();
list_for_each_entry_rcu(batman_if, &if_list, list) {
if ((batman_if->net_dev) &&
(compare_orig(batman_if->net_dev->dev_addr, addr))) {
if (batman_if->if_status != IF_ACTIVE)
continue;
if (compare_orig(batman_if->net_dev->dev_addr, addr)) {
rcu_read_unlock();
return 1;
}
......
......@@ -391,11 +391,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
{
struct orig_node *orig_node;
unsigned long flags;
HASHIT(hashit);
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
spin_lock(&orig_hash_lock);
spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
......@@ -404,11 +405,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
goto err;
}
spin_unlock(&orig_hash_lock);
spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
spin_unlock(&orig_hash_lock);
spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
......@@ -468,12 +469,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
{
struct batman_if *batman_if_tmp;
struct orig_node *orig_node;
unsigned long flags;
HASHIT(hashit);
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
* if_num */
spin_lock(&orig_hash_lock);
spin_lock_irqsave(&orig_hash_lock, flags);
while (hash_iterate(orig_hash, &hashit)) {
orig_node = hashit.bucket->data;
......@@ -500,10 +502,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
rcu_read_unlock();
batman_if->if_num = -1;
spin_unlock(&orig_hash_lock);
spin_unlock_irqrestore(&orig_hash_lock, flags);
return 0;
err:
spin_unlock(&orig_hash_lock);
spin_unlock_irqrestore(&orig_hash_lock, flags);
return -ENOMEM;
}
......@@ -783,6 +783,8 @@ int recv_bat_packet(struct sk_buff *skb,
static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
{
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
......@@ -801,6 +803,9 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
return NET_RX_DROP;
}
if (!bat_priv->primary_if)
return NET_RX_DROP;
/* answer echo request (ping) */
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
......@@ -830,7 +835,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
memcpy(icmp_packet->orig,
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = ECHO_REPLY;
icmp_packet->ttl = TTL;
......@@ -845,6 +851,8 @@ static int recv_my_icmp_packet(struct sk_buff *skb, size_t icmp_len)
static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
{
/* FIXME: each batman_if will be attached to a softif */
struct bat_priv *bat_priv = netdev_priv(soft_device);
struct orig_node *orig_node;
struct icmp_packet *icmp_packet;
struct ethhdr *ethhdr;
......@@ -865,6 +873,9 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
return NET_RX_DROP;
}
if (!bat_priv->primary_if)
return NET_RX_DROP;
/* get routing information */
spin_lock_irqsave(&orig_hash_lock, flags);
orig_node = ((struct orig_node *)
......@@ -892,7 +903,8 @@ static int recv_icmp_ttl_exceeded(struct sk_buff *skb, size_t icmp_len)
}
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, ethhdr->h_dest, ETH_ALEN);
memcpy(icmp_packet->orig,
bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = TTL_EXCEEDED;
icmp_packet->ttl = TTL;
......
......@@ -126,6 +126,7 @@ struct socket_client {
unsigned char index;
spinlock_t lock;
wait_queue_head_t queue_wait;
struct bat_priv *bat_priv;
};
struct socket_packet {
......
config DX_SEP
tristate "Discretix SEP driver"
# depends on MRST
depends on RAR_REGISTER && PCI
default y
help
Discretix SEP driver
If unsure say M. The compiled module will be
called sep_driver.ko
obj-$(CONFIG_DX_SEP) := sep_driver.o
Todo's so far (from Alan Cox)
- Fix firmware loading
- Get firmware into firmware git tree
- Review and tidy each algorithm function
- Check whether it can be plugged into any of the kernel crypto API
interfaces
- Do something about the magic shared memory interface and replace it
with something saner (in Linux terms)
#ifndef __SEP_DEV_H__
#define __SEP_DEV_H__
/*
*
* sep_dev.h - Security Processor Device Structures
*
* Copyright(c) 2009 Intel Corporation. All rights reserved.
* Copyright(c) 2009 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* CONTACTS:
*
* Alan Cox alan@linux.intel.com
*
*/
struct sep_device {
/* pointer to pci dev */
struct pci_dev *pdev;
unsigned long in_use;
/* address of the shared memory allocated during init for SEP driver
(coherent alloc) */
void *shared_addr;
/* the physical address of the shared area */
dma_addr_t shared_bus;
/* restricted access region (coherent alloc) */
dma_addr_t rar_bus;
void *rar_addr;
/* firmware regions: cache is at rar_addr */
unsigned long cache_size;
/* follows the cache */
dma_addr_t resident_bus;
unsigned long resident_size;
void *resident_addr;
/* start address of the access to the SEP registers from driver */
void __iomem *reg_addr;
/* transaction counter that coordinates the transactions between SEP and HOST */
unsigned long send_ct;
/* counter for the messages from sep */
unsigned long reply_ct;
/* counter for the number of bytes allocated in the pool for the current
transaction */
unsigned long data_pool_bytes_allocated;
/* array of pointers to the pages that represent input data for the synchronic
DMA action */
struct page **in_page_array;
/* array of pointers to the pages that represent out data for the synchronic
DMA action */
struct page **out_page_array;
/* number of pages in the sep_in_page_array */
unsigned long in_num_pages;
/* number of pages in the sep_out_page_array */
unsigned long out_num_pages;
/* global data for every flow */
struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
/* pointer to the workqueue that handles the flow done interrupts */
struct workqueue_struct *flow_wq;
};
static struct sep_device *sep_dev;
static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
{
void __iomem *addr = dev->reg_addr + reg;
writel(value, addr);
}
static inline u32 sep_read_reg(struct sep_device *dev, int reg)
{
void __iomem *addr = dev->reg_addr + reg;
return readl(addr);
}
/* wait for SRAM write complete(indirect write */
static inline void sep_wait_sram_write(struct sep_device *dev)
{
u32 reg_val;
do
reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
while (!(reg_val & 1));
}
#endif
This diff is collapsed.
This diff is collapsed.
/*
*
* sep_driver_config.h - Security Processor Driver configuration
*
* Copyright(c) 2009 Intel Corporation. All rights reserved.
* Copyright(c) 2009 Discretix. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* CONTACTS:
*
* Mark Allyn mark.a.allyn@intel.com
*
* CHANGES:
*
* 2009.06.26 Initial publish
*
*/
#ifndef __SEP_DRIVER_CONFIG_H__
#define __SEP_DRIVER_CONFIG_H__
/*--------------------------------------
DRIVER CONFIGURATION FLAGS
-------------------------------------*/
/* if flag is on , then the driver is running in polling and
not interrupt mode */
#define SEP_DRIVER_POLLING_MODE 1
/* flag which defines if the shared area address should be
reconfiged (send to SEP anew) during init of the driver */
#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
#define SEP_DRIVER_ARM_DEBUG_MODE 0
/*-------------------------------------------
INTERNAL DATA CONFIGURATION
-------------------------------------------*/
/* flag for the input array */
#define SEP_DRIVER_IN_FLAG 0
/* flag for output array */
#define SEP_DRIVER_OUT_FLAG 1
/* maximum number of entries in one LLI tables */
#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8
/*--------------------------------------------------------
SHARED AREA memory total size is 36K
it is divided is following:
SHARED_MESSAGE_AREA 8K }
}
STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
}
DATA_POOL_AREA 12K }
SYNCHRONIC_DMA_TABLES_AREA 5K
FLOW_DMA_TABLES_AREA 4K
SYSTEM_MEMORY_AREA 3k
SYSTEM_MEMORY total size is 3k
it is divided as following:
TIME_MEMORY_AREA 8B
-----------------------------------------------------------*/
/*
the maximum length of the message - the rest of the message shared
area will be dedicated to the dma lli tables
*/
#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
/* the size of the message shared area in pages */
#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
/* the size of the data pool static area in pages */
#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
/* the size of the data pool shared area size in pages */
#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024)
/* the size of the message shared area in pages */
#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5)
/* the size of the data pool shared area size in pages */
#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
/* system data (time, caller id etc') pool */
#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100
/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
DATA POOL areas. area must be module 4k */
#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24)
/*-----------------------------------------------
offsets of the areas starting from the shared area start address
*/
/* message area offset */
#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
/* static pool area offset */
#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
(SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
/* data pool area offset */
#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
(SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
/* synhronic dma tables area offset */
#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
(SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
/* sep driver flow dma tables area offset */
#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
(SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
/* system memory offset in bytes */
#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
(SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
/* offset of the time area */
#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
(SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
/* start physical address of the SEP registers memory in HOST */
#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000
/* size of the SEP registers memory region in HOST (for now 100 registers) */
#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000)
/* define the number of IRQ for SEP interrupts */
#define SEP_DIRVER_IRQ_NUM 1
/* maximum number of add buffers */
#define SEP_MAX_NUM_ADD_BUFFERS 100
/* number of flows */
#define SEP_DRIVER_NUM_FLOWS 4
/* maximum number of entries in flow table */
#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25
/* offset of the num entries in the block length entry of the LLI */
#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24
/* offset of the interrupt flag in the block length entry of the LLI */
#define SEP_INT_FLAG_OFFSET_IN_BITS 31
/* mask for extracting data size from LLI */
#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF
/* mask for entries after being shifted left */
#define SEP_NUM_ENTRIES_MASK 0x7F
/* default flow id */
#define SEP_FREE_FLOW_ID 0xFFFFFFFF
/* temp flow id used during cretiong of new flow until receiving
real flow id from sep */
#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1)
/* maximum add buffers message length in bytes */
#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4)
/* maximum number of concurrent virtual buffers */
#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100
/* the token that defines the start of time address */
#define SEP_TIME_VAL_TOKEN 0x12345678
/* DEBUG LEVEL MASKS */
#define SEP_DEBUG_LEVEL_BASIC 0x1
#define SEP_DEBUG_LEVEL_EXTENDED 0x4
/* Debug helpers */
#define dbg(fmt, args...) \
do {\
if (debug & SEP_DEBUG_LEVEL_BASIC) \
printk(KERN_DEBUG fmt, ##args); \
} while(0);
#define edbg(fmt, args...) \
do { \
if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
printk(KERN_DEBUG fmt, ##args); \
} while(0);
#endif
This diff is collapsed.
......@@ -27,6 +27,7 @@
#include <linux/kthread.h>
#include <linux/log2.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
/**** Helper functions used for Div, Remainder operation on u64 ****/
......@@ -113,7 +114,6 @@ u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
#define GLOB_SBD_NAME "nd"
#define GLOB_SBD_IRQ_NUM (29)
#define GLOB_VERSION "driver version 20091110"
#define GLOB_SBD_IOCTL_GC (0x7701)
#define GLOB_SBD_IOCTL_WL (0x7702)
......@@ -272,13 +272,6 @@ static int get_res_blk_num_os(void)
return res_blks;
}
static void SBD_prepare_flush(struct request_queue *q, struct request *rq)
{
rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
/* rq->timeout = 5 * HZ; */
rq->cmd[0] = REQ_LB_OP_FLUSH;
}
/* Transfer a full request. */
static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
{
......@@ -296,8 +289,7 @@ static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
IdentifyDeviceData.PagesPerBlock *
res_blks_os;
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_FLUSH) {
if (req->cmd_type & REQ_FLUSH) {
if (force_flush_cache()) /* Fail to flush cache */
return -EIO;
else
......@@ -597,11 +589,23 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
return -ENOTTY;
}
int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
int ret;
lock_kernel();
ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
unlock_kernel();
return ret;
}
static struct block_device_operations GLOB_SBD_ops = {
.owner = THIS_MODULE,
.open = GLOB_SBD_open,
.release = GLOB_SBD_release,
.locked_ioctl = GLOB_SBD_ioctl,
.ioctl = GLOB_SBD_unlocked_ioctl,
.getgeo = GLOB_SBD_getgeo,
};
......@@ -650,8 +654,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
/* Here we force report 512 byte hardware sector size to Kernel */
blk_queue_logical_block_size(dev->queue, 512);
blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH,
SBD_prepare_flush);
blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH);
dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
if (IS_ERR(dev->thread)) {
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment