Commit 12e55508 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'staging-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging

* 'staging-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (466 commits)
  net/hyperv: Add support for jumbo frame up to 64KB
  net/hyperv: Add NETVSP protocol version negotiation
  net/hyperv: Remove unnecessary kmap_atomic in netvsc driver
  staging/rtl8192e: Register against lib80211
  staging/rtl8192e: Convert to lib80211_crypt_info
  staging/rtl8192e: Convert to lib80211_crypt_data and lib80211_crypt_ops
  staging/rtl8192e: Add lib80211.h to rtllib.h
  staging/mei: add watchdog device registration wrappers
  drm/omap: GEM, deal with cache
  staging: vt6656: int.c, int.h: Change return of function to void
  staging: usbip: removed unused definitions from header
  staging: usbip: removed dead code from receive function
  staging:iio: Drop {mark,unmark}_in_use callbacks
  staging:iio: Drop buffer mark_param_change callback
  staging:iio: Drop the unused buffer enable() and is_enabled() callbacks
  staging:iio: Drop buffer busy flag
  staging:iio: Make sure a device is only opened once at a time
  staging:iio: Disallow modifying buffer size when buffer is enabled
  staging:iio: Disallow changing scan elements in all buffered modes
  staging:iio: Use iio_buffer_enabled instead of open coding it
  ...

Fix up conflict in drivers/staging/iio/adc/ad799x_core.c (removal of
module_init due to using module_i2c_driver() helper, next to removal of
MODULE_ALIAS due to using MODULE_DEVICE_TABLE instead).
parents 55b81e6f 4d447c9a
NVIDIA compliant embedded controller
Required properties:
- compatible : should be "nvidia,nvec".
- reg : the iomem of the i2c slave controller
- interrupts : the interrupt line of the i2c slave controller
- clock-frequency : the frequency of the i2c bus
- gpios : the gpio used for ec request
- slave-addr: the i2c address of the slave controller
......@@ -184,11 +184,6 @@ S: Maintained
F: Documentation/filesystems/9p.txt
F: fs/9p/
A2232 SERIAL BOARD DRIVER
L: linux-m68k@lists.linux-m68k.org
S: Orphan
F: drivers/staging/generic_serial/ser_a2232*
AACRAID SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid@adaptec.com>
L: linux-scsi@vger.kernel.org
......@@ -1587,7 +1582,7 @@ M: Franky (Zhenhui) Lin <frankyl@broadcom.com>
M: Kan Yan <kanyan@broadcom.com>
L: linux-wireless@vger.kernel.org
S: Supported
F: drivers/staging/brcm80211/
F: drivers/net/wireless/brcm80211/
BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
M: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
......@@ -1891,12 +1886,6 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/compal-laptop.c
COMPUTONE INTELLIPORT MULTIPORT CARD
W: http://www.wittsend.com/computone.html
S: Orphan
F: Documentation/serial/computone.txt
F: drivers/staging/tty/ip2/
CONEXANT ACCESSRUNNER USB DRIVER
M: Simon Arlott <cxacru@fire.lp0.eu>
L: accessrunner-general@lists.sourceforge.net
......@@ -2200,15 +2189,6 @@ F: drivers/md/dm*
F: include/linux/device-mapper.h
F: include/linux/dm-*.h
DIGI INTL. EPCA DRIVER
M: "Digi International, Inc" <Eng.Linux@digi.com>
L: Eng.Linux@digi.com
W: http://www.digi.com
S: Orphan
F: Documentation/serial/digiepca.txt
F: drivers/staging/tty/epca*
F: drivers/staging/tty/digi*
DIOLAN U2C-12 I2C DRIVER
M: Guenter Roeck <guenter.roeck@ericsson.com>
L: linux-i2c@vger.kernel.org
......@@ -5555,11 +5535,6 @@ M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
F: drivers/memstick/host/r592.*
RISCOM8 DRIVER
S: Orphan
F: Documentation/serial/riscom8.txt
F: drivers/staging/tty/riscom8*
ROCKETPORT DRIVER
P: Comtrol Corp.
W: http://www.comtrol.com
......@@ -6222,11 +6197,6 @@ F: arch/arm/mach-spear3xx/spear3*0_evb.c
F: arch/arm/mach-spear6xx/spear600.c
F: arch/arm/mach-spear6xx/spear600_evb.c
SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
S: Orphan
F: Documentation/serial/specialix.txt
F: drivers/staging/tty/specialix*
SPI SUBSYSTEM
M: Grant Likely <grant.likely@secretlab.ca>
L: spi-devel-general@lists.sourceforge.net
......@@ -6304,11 +6274,6 @@ M: Manu Abraham <abraham.manu@gmail.com>
S: Odd Fixes
F: drivers/staging/crystalhd/
STAGING - CYPRESS WESTBRIDGE SUPPORT
M: David Cross <david.cross@cypress.com>
S: Odd Fixes
F: drivers/staging/westbridge/
STAGING - ECHO CANCELLER
M: Steve Underwood <steveu@coppice.org>
M: David Rowe <david@rowetel.com>
......@@ -6414,7 +6379,7 @@ S: Odd Fixes
F: drivers/staging/winbond/
STAGING - XGI Z7,Z9,Z11 PCI DISPLAY DRIVER
M: Arnaud Patard <apatard@mandriva.com>
M: Arnaud Patard <arnaud.patard@rtp-net.org>
S: Odd Fixes
F: drivers/staging/xgifb/
......
......@@ -342,4 +342,6 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
source "drivers/net/hyperv/Kconfig"
endif # NETDEVICES
......@@ -68,3 +68,5 @@ obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_HYPERV_NET) += hyperv/
config HYPERV_NET
tristate "Microsoft Hyper-V virtual network driver"
depends on HYPERV
help
Select this option to enable the Hyper-V virtual network driver.
obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
......@@ -39,9 +39,6 @@ struct xferpage_packet {
u32 count;
};
/* The number of pages which are enough to cover jumbo frame buffer. */
#define NETVSC_PACKET_MAXPAGE 4
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
* within the RNDIS
......@@ -77,8 +74,9 @@ struct hv_netvsc_packet {
u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
void *data;
u32 page_buf_cnt;
struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
struct hv_page_buffer page_buf[0];
};
struct netvsc_device_info {
......@@ -87,6 +85,27 @@ struct netvsc_device_info {
int ring_size;
};
enum rndis_device_state {
RNDIS_DEV_UNINITIALIZED = 0,
RNDIS_DEV_INITIALIZING,
RNDIS_DEV_INITIALIZED,
RNDIS_DEV_DATAINITIALIZED,
};
struct rndis_device {
struct netvsc_device *net_dev;
enum rndis_device_state state;
bool link_state;
atomic_t new_req_id;
spinlock_t request_lock;
struct list_head req_list;
unsigned char hw_mac_adr[ETH_ALEN];
};
/* Interface */
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
......@@ -109,11 +128,13 @@ int rndis_filter_receive(struct hv_device *dev,
int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
#define NVSP_PROTOCOL_VERSION_1 2
#define NVSP_MIN_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
#define NVSP_MAX_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
#define NVSP_PROTOCOL_VERSION_2 0x30002
enum {
NVSP_MSG_TYPE_NONE = 0,
......@@ -138,11 +159,36 @@ enum {
NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
/*
* This should be set to the number of messages for the version with
* the maximum number of messages.
*/
NVSP_NUM_MSG_PER_VERSION = 9,
/* Version 2 messages */
NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF,
NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF_COMP,
NVSP_MSG2_TYPE_REVOKE_CHIMNEY_DELEGATED_BUF,
NVSP_MSG2_TYPE_RESUME_CHIMNEY_RX_INDICATION,
NVSP_MSG2_TYPE_TERMINATE_CHIMNEY,
NVSP_MSG2_TYPE_TERMINATE_CHIMNEY_COMP,
NVSP_MSG2_TYPE_INDICATE_CHIMNEY_EVENT,
NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT,
NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT_COMP,
NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ,
NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ_COMP,
NVSP_MSG2_TYPE_ALLOC_RXBUF,
NVSP_MSG2_TYPE_ALLOC_RXBUF_COMP,
NVSP_MSG2_TYPE_FREE_RXBUF,
NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT,
NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT_COMP,
NVSP_MSG2_TYPE_SEND_NDIS_CONFIG,
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
};
enum {
......@@ -153,6 +199,7 @@ enum {
NVSP_STAT_PROTOCOL_TOO_OLD,
NVSP_STAT_INVALID_RNDIS_PKT,
NVSP_STAT_BUSY,
NVSP_STAT_PROTOCOL_UNSUPPORTED,
NVSP_STAT_MAX,
};
......@@ -337,9 +384,69 @@ union nvsp_1_message_uber {
send_rndis_pkt_complete;
} __packed;
/*
* Network VSP protocol version 2 messages:
*/
struct nvsp_2_vsc_capability {
union {
u64 data;
struct {
u64 vmq:1;
u64 chimney:1;
u64 sriov:1;
u64 ieee8021q:1;
u64 correlation_id:1;
};
};
} __packed;
struct nvsp_2_send_ndis_config {
u32 mtu;
u32 reserved;
struct nvsp_2_vsc_capability capability;
} __packed;
/* Allocate receive buffer */
struct nvsp_2_alloc_rxbuf {
/* Allocation ID to match the allocation request and response */
u32 alloc_id;
/* Length of the VM shared memory receive buffer that needs to
* be allocated
*/
u32 len;
} __packed;
/* Allocate receive buffer complete */
struct nvsp_2_alloc_rxbuf_comp {
/* The NDIS_STATUS code for buffer allocation */
u32 status;
u32 alloc_id;
/* GPADL handle for the allocated receive buffer */
u32 gpadl_handle;
/* Receive buffer ID */
u64 recv_buf_id;
} __packed;
struct nvsp_2_free_rxbuf {
u64 recv_buf_id;
} __packed;
union nvsp_2_message_uber {
struct nvsp_2_send_ndis_config send_ndis_config;
struct nvsp_2_alloc_rxbuf alloc_rxbuf;
struct nvsp_2_alloc_rxbuf_comp alloc_rxbuf_comp;
struct nvsp_2_free_rxbuf free_rxbuf;
} __packed;
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
union nvsp_2_message_uber v2_msg;
} __packed;
/* ALL Messages */
......@@ -349,12 +456,9 @@ struct nvsp_message {
} __packed;
#define NETVSC_MTU 65536
/* #define NVSC_MIN_PROTOCOL_VERSION 1 */
/* #define NVSC_MAX_PROTOCOL_VERSION 1 */
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024) /* 1MB */
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
......@@ -369,7 +473,10 @@ struct nvsp_message {
struct netvsc_device {
struct hv_device *dev;
u32 nvsp_version;
atomic_t num_outstanding_sends;
bool start_remove;
bool destroy;
/*
* List of free preallocated hv_netvsc_packet to represent receive
......
......@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include "hyperv_net.h"
......@@ -41,7 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
if (!net_device)
return NULL;
net_device->start_remove = false;
net_device->destroy = false;
net_device->dev = device;
net_device->ndev = ndev;
......@@ -230,19 +231,16 @@ static int netvsc_init_recv_buf(struct hv_device *device)
net_device->recv_section_cnt = init_packet->msg.
v1_msg.send_recv_buf_complete.num_sections;
net_device->recv_section = kmalloc(net_device->recv_section_cnt
* sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
net_device->recv_section = kmemdup(
init_packet->msg.v1_msg.send_recv_buf_complete.sections,
net_device->recv_section_cnt *
sizeof(struct nvsp_1_receive_buffer_section),
GFP_KERNEL);
if (net_device->recv_section == NULL) {
ret = -EINVAL;
goto cleanup;
}
memcpy(net_device->recv_section,
init_packet->msg.v1_msg.
send_recv_buf_complete.sections,
net_device->recv_section_cnt *
sizeof(struct nvsp_1_receive_buffer_section));
/*
* For 1st release, there should only be 1 section that represents the
* entire receive buffer
......@@ -263,27 +261,18 @@ static int netvsc_init_recv_buf(struct hv_device *device)
}
static int netvsc_connect_vsp(struct hv_device *device)
/* Negotiate NVSP protocol version */
static int negotiate_nvsp_ver(struct hv_device *device,
struct netvsc_device *net_device,
struct nvsp_message *init_packet,
u32 nvsp_ver)
{
int ret, t;
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
struct net_device *ndev;
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
ndev = net_device->ndev;
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
init_packet->msg.init_msg.init.min_protocol_ver =
NVSP_MIN_PROTOCOL_VERSION;
init_packet->msg.init_msg.init.max_protocol_ver =
NVSP_MAX_PROTOCOL_VERSION;
init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
......@@ -293,26 +282,62 @@ static int netvsc_connect_vsp(struct hv_device *device)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
return ret;
t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (t == 0)
return -ETIMEDOUT;
if (init_packet->msg.init_msg.init_complete.status !=
NVSP_STAT_SUCCESS) {
ret = -EINVAL;
goto cleanup;
}
NVSP_STAT_SUCCESS)
return -EINVAL;
if (init_packet->msg.init_msg.init_complete.
negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
return 0;
/* NVSPv2 only: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
ret = vmbus_sendpacket(device->channel, init_packet,
sizeof(struct nvsp_message),
(unsigned long)init_packet,
VM_PKT_DATA_INBAND, 0);
return ret;
}
static int netvsc_connect_vsp(struct hv_device *device)
{
int ret;
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
struct net_device *ndev;
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
ndev = net_device->ndev;
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
if (negotiate_nvsp_ver(device, net_device, init_packet,
NVSP_PROTOCOL_VERSION_2) == 0) {
net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
} else if (negotiate_nvsp_ver(device, net_device, init_packet,
NVSP_PROTOCOL_VERSION_1) == 0) {
net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
} else {
ret = -EPROTO;
goto cleanup;
}
pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
......@@ -438,6 +463,9 @@ static void netvsc_send_completion(struct hv_device *device,
nvsc_packet->completion.send.send_completion_ctx);
atomic_dec(&net_device->num_outstanding_sends);
if (netif_queue_stopped(ndev) && !net_device->start_remove)
netif_wake_queue(ndev);
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
......@@ -488,11 +516,16 @@ int netvsc_send(struct hv_device *device,
}
if (ret != 0)
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
} else if (ret == -EAGAIN) {
netif_stop_queue(ndev);
if (atomic_read(&net_device->num_outstanding_sends) < 1)
netif_wake_queue(ndev);
} else {
netdev_err(ndev, "Unable to send packet %p ret %d\n",
packet, ret);
else
atomic_inc(&net_device->num_outstanding_sends);
}
return ret;
}
......@@ -598,12 +631,10 @@ static void netvsc_receive(struct hv_device *device,
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
unsigned long start;
unsigned long end, end_virtual;
/* struct netvsc_driver *netvscDriver; */
struct xferpage_packet *xferpage_packet = NULL;
int i, j;
int count = 0, bytes_remain = 0;
int i;
int count = 0;
unsigned long flags;
struct net_device *ndev;
......@@ -712,53 +743,10 @@ static void netvsc_receive(struct hv_device *device,
netvsc_packet->completion.recv.recv_completion_tid =
vmxferpage_packet->d.trans_id;
netvsc_packet->data = (void *)((unsigned long)net_device->
recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
vmxferpage_packet->ranges[i].byte_count;
netvsc_packet->page_buf_cnt = 1;
netvsc_packet->page_buf[0].len =
vmxferpage_packet->ranges[i].byte_count;
start = virt_to_phys((void *)((unsigned long)net_device->
recv_buf + vmxferpage_packet->ranges[i].byte_offset));
netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
end_virtual = (unsigned long)net_device->recv_buf
+ vmxferpage_packet->ranges[i].byte_offset
+ vmxferpage_packet->ranges[i].byte_count - 1;
end = virt_to_phys((void *)end_virtual);
/* Calculate the page relative offset */
netvsc_packet->page_buf[0].offset =
vmxferpage_packet->ranges[i].byte_offset &
(PAGE_SIZE - 1);
if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
/* Handle frame across multiple pages: */
netvsc_packet->page_buf[0].len =
(netvsc_packet->page_buf[0].pfn <<
PAGE_SHIFT)
+ PAGE_SIZE - start;
bytes_remain = netvsc_packet->total_data_buflen -
netvsc_packet->page_buf[0].len;
for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
netvsc_packet->page_buf[j].offset = 0;
if (bytes_remain <= PAGE_SIZE) {
netvsc_packet->page_buf[j].len =
bytes_remain;
bytes_remain = 0;
} else {
netvsc_packet->page_buf[j].len =
PAGE_SIZE;
bytes_remain -= PAGE_SIZE;
}
netvsc_packet->page_buf[j].pfn =
virt_to_phys((void *)(end_virtual -
bytes_remain)) >> PAGE_SHIFT;
netvsc_packet->page_buf_cnt++;
if (bytes_remain == 0)
break;
}
}
/* Pass it to the upper layer */
rndis_filter_receive(device, netvsc_packet);
......
......@@ -43,24 +43,59 @@
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
atomic_t avail;
struct delayed_work dwork;
};
#define PACKET_PAGES_LOWATER 8
/* Need this many pages to handle worst case fragmented packet */
#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
/* no-op so the netdev core doesn't return -EINVAL when modifying the the
* multicast address list in SIOCADDMULTI. hv is setup to get all multicast
* when it calls RndisFilterOnOpen() */
struct set_multicast_work {
struct work_struct work;
struct net_device *net;
};
static void do_set_multicast(struct work_struct *w)
{
struct set_multicast_work *swk =
container_of(w, struct set_multicast_work, work);
struct net_device *net = swk->net;
struct net_device_context *ndevctx = netdev_priv(net);
struct netvsc_device *nvdev;
struct rndis_device *rdev;
nvdev = hv_get_drvdata(ndevctx->device_ctx);
if (nvdev == NULL)
return;
rdev = nvdev->extension;
if (rdev == NULL)
return;
if (net->flags & IFF_PROMISC)
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_PROMISCUOUS);
else
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_BROADCAST |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_DIRECTED);
kfree(w);
}
static void netvsc_set_multicast_list(struct net_device *net)
{
struct set_multicast_work *swk =
kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
if (swk == NULL)
return;
swk->net = net;
INIT_WORK(&swk->work, do_set_multicast);
schedule_work(&swk->work);
}
static int netvsc_open(struct net_device *net)
......@@ -104,18 +139,8 @@ static void netvsc_xmit_completion(void *context)
kfree(packet);
if (skb) {
struct net_device *net = skb->dev;
struct net_device_context *net_device_ctx = netdev_priv(net);
unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
if (skb)
dev_kfree_skb_any(skb);
atomic_add(num_pages, &net_device_ctx->avail);
if (atomic_read(&net_device_ctx->avail) >=
PACKET_PAGES_HIWATER)
netif_wake_queue(net);
}
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
......@@ -123,12 +148,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet;
int ret;
unsigned int i, num_pages;
unsigned int i, num_pages, npg_data;
/* Add 1 for skb->data and additional one for RNDIS */
num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
if (num_pages > atomic_read(&net_device_ctx->avail))
return NETDEV_TX_BUSY;
/* Add multipage for skb->data and additional one for RNDIS */
npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
>> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
num_pages = skb_shinfo(skb)->nr_frags + npg_data + 1;
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
......@@ -151,21 +176,36 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->page_buf_cnt = num_pages;
/* Initialize it from the skb */
packet->total_data_buflen = skb->len;
packet->total_data_buflen = skb->len;
/* Start filling in the page buffers starting after RNDIS buffer. */
packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
packet->page_buf[1].offset
= (unsigned long)skb->data & (PAGE_SIZE - 1);
packet->page_buf[1].len = skb_headlen(skb);
if (npg_data == 1)
packet->page_buf[1].len = skb_headlen(skb);
else
packet->page_buf[1].len = PAGE_SIZE
- packet->page_buf[1].offset;
for (i = 2; i <= npg_data; i++) {
packet->page_buf[i].pfn = virt_to_phys(skb->data
+ PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
packet->page_buf[i].offset = 0;
packet->page_buf[i].len = PAGE_SIZE;
}
if (npg_data > 1)
packet->page_buf[npg_data].len = (((unsigned long)skb->data
+ skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
/* Additional fragments are after SKB data */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
packet->page_buf[i+2].offset = f->page_offset;
packet->page_buf[i+2].len = skb_frag_size(f);
packet->page_buf[i+npg_data+1].pfn =
page_to_pfn(skb_frag_page(f));
packet->page_buf[i+npg_data+1].offset = f->page_offset;
packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
}
/* Set the completion routine */
......@@ -178,10 +218,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
atomic_sub(num_pages, &net_device_ctx->avail);
if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
netif_stop_queue(net);
} else {
/* we are shutting down or bus overloaded, just drop packet */
net->stats.tx_dropped++;
......@@ -232,9 +268,6 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = dev_get_drvdata(&device_obj->device);
struct sk_buff *skb;
void *data;
int i;
unsigned long flags;
struct netvsc_device *net_device;
net_device = hv_get_drvdata(device_obj);
......@@ -253,27 +286,12 @@ int netvsc_recv_callback(struct hv_device *device_obj,
return 0;
}
/* for kmap_atomic */
local_irq_save(flags);
/*
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
for (i = 0; i < packet->page_buf_cnt; i++) {
data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
KM_IRQ1);
data = (void *)(unsigned long)data +
packet->page_buf[i].offset;
memcpy(skb_put(skb, packet->page_buf[i].len), data,
packet->page_buf[i].len);
kunmap_atomic((void *)((unsigned long)data -
packet->page_buf[i].offset), KM_IRQ1);
}
local_irq_restore(flags);
memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
......@@ -299,6 +317,39 @@ static void netvsc_get_drvinfo(struct net_device *net,
strcpy(info->fw_version, "N/A");
}
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device *nvdev = hv_get_drvdata(hdev);
struct netvsc_device_info device_info;
int limit = ETH_DATA_LEN;
if (nvdev == NULL || nvdev->destroy)
return -ENODEV;
if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
limit = NETVSC_MTU;
if (mtu < 68 || mtu > limit)
return -EINVAL;
nvdev->start_remove = true;
cancel_delayed_work_sync(&ndevctx->dwork);
netif_stop_queue(ndev);
rndis_filter_device_remove(hdev);
ndev->mtu = mtu;
ndevctx->device_ctx = hdev;
hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size;
rndis_filter_device_add(hdev, &device_info);
netif_wake_queue(ndev);
return 0;
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
......@@ -309,7 +360,7 @@ static const struct net_device_ops device_ops = {
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_rx_mode = netvsc_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
......@@ -351,7 +402,6 @@ static int netvsc_probe(struct hv_device *dev,
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
atomic_set(&net_device_ctx->avail, ring_size);
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
......@@ -403,6 +453,8 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
net_device->start_remove = true;
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
......
......@@ -30,26 +30,6 @@
#include "hyperv_net.h"
enum rndis_device_state {
RNDIS_DEV_UNINITIALIZED = 0,
RNDIS_DEV_INITIALIZING,
RNDIS_DEV_INITIALIZED,
RNDIS_DEV_DATAINITIALIZED,
};
struct rndis_device {
struct netvsc_device *net_dev;
enum rndis_device_state state;
bool link_state;
atomic_t new_req_id;
spinlock_t request_lock;
struct list_head req_list;
unsigned char hw_mac_adr[ETH_ALEN];
};
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
......@@ -329,7 +309,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
{
struct rndis_packet *rndis_pkt;
u32 data_offset;
int i;
rndis_pkt = &msg->msg.pkt;
......@@ -342,17 +321,7 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
pkt->total_data_buflen -= data_offset;
pkt->page_buf[0].offset += data_offset;
pkt->page_buf[0].len -= data_offset;
/* Drop the 0th page, if rndis data go beyond page boundary */
if (pkt->page_buf[0].offset >= PAGE_SIZE) {
pkt->page_buf[1].offset = pkt->page_buf[0].offset - PAGE_SIZE;
pkt->page_buf[1].len -= pkt->page_buf[1].offset;
pkt->page_buf_cnt--;
for (i = 0; i < pkt->page_buf_cnt; i++)
pkt->page_buf[i] = pkt->page_buf[i+1];
}
pkt->data = (void *)((unsigned long)pkt->data + data_offset);
pkt->is_data_pkt = true;
......@@ -387,11 +356,7 @@ int rndis_filter_receive(struct hv_device *dev,
return -ENODEV;
}
rndis_hdr = (struct rndis_message *)kmap_atomic(
pfn_to_page(pkt->page_buf[0].pfn), KM_IRQ0);
rndis_hdr = (void *)((unsigned long)rndis_hdr +
pkt->page_buf[0].offset);
rndis_hdr = pkt->data;
/* Make sure we got a valid rndis message */
if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
......@@ -407,8 +372,6 @@ int rndis_filter_receive(struct hv_device *dev,
sizeof(struct rndis_message) :
rndis_hdr->msg_len);
kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
dump_rndis_message(dev, &rndis_msg);
switch (rndis_msg.ndis_msg_type) {
......@@ -522,8 +485,7 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
return ret;
}
static int rndis_filter_set_packet_filter(struct rndis_device *dev,
u32 new_filter)
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
......
......@@ -72,8 +72,6 @@ source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
source "drivers/staging/spectra/Kconfig"
source "drivers/staging/quatech_usb2/Kconfig"
source "drivers/staging/vt6655/Kconfig"
......@@ -116,8 +114,6 @@ source "drivers/staging/bcm/Kconfig"
source "drivers/staging/ft1000/Kconfig"
source "drivers/staging/intel_sst/Kconfig"
source "drivers/staging/speakup/Kconfig"
source "drivers/staging/cptm1217/Kconfig"
......@@ -132,4 +128,8 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
source "drivers/staging/omapdrm/Kconfig"
source "drivers/staging/android/Kconfig"
endif # STAGING
......@@ -21,7 +21,6 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
obj-$(CONFIG_RTS5139) += rts5139/
obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_IDE_PHISON) += phison/
......@@ -50,10 +49,11 @@ obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
obj-$(CONFIG_USB_ENESTORAGE) += keucr/
obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
obj-$(CONFIG_SND_INTEL_SST) += intel_sst/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
obj-$(CONFIG_DRM_PSB) += gma500/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_ANDROID) += android/
menu "Android"
config ANDROID
bool "Android Drivers"
default N
---help---
Enable support for various drivers needed on the Android platform
if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
default n
config ASHMEM
bool "Enable the Anonymous Shared Memory Subsystem"
default n
depends on SHMEM || TINY_SHMEM
help
The ashmem subsystem is a new shared memory allocator, similar to
POSIX SHM but with different behavior and sporting a simpler
file-based API.
config ANDROID_LOGGER
tristate "Android log driver"
default n
config ANDROID_RAM_CONSOLE
bool "Android RAM buffer console"
default n
config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
bool "Enable verbose console messages on Android RAM console"
default y
depends on ANDROID_RAM_CONSOLE
menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
bool "Android RAM Console Enable error correction"
default n
depends on ANDROID_RAM_CONSOLE
depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
select REED_SOLOMON
select REED_SOLOMON_ENC8
select REED_SOLOMON_DEC8
if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
int "Android RAM Console Data data size"
default 128
help
Must be a power of 2.
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
int "Android RAM Console ECC size"
default 16
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
int "Android RAM Console Symbol size"
default 8
config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
hex "Android RAM Console Polynomial"
default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
config ANDROID_RAM_CONSOLE_EARLY_INIT
bool "Start Android RAM console early"
default n
depends on ANDROID_RAM_CONSOLE
config ANDROID_RAM_CONSOLE_EARLY_ADDR
hex "Android RAM console virtual address"
default 0
depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_RAM_CONSOLE_EARLY_SIZE
hex "Android RAM console buffer size"
default 0
depends on ANDROID_RAM_CONSOLE_EARLY_INIT
config ANDROID_TIMED_OUTPUT
bool "Timed output class driver"
default y
config ANDROID_TIMED_GPIO
tristate "Android timed gpio driver"
depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
default n
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
default N
---help---
Register processes to be killed when memory is low
config ANDROID_PMEM
bool "Android pmem allocator"
depends on ARM
source "drivers/staging/android/switch/Kconfig"
endif # if ANDROID
endmenu
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
obj-$(CONFIG_ASHMEM) += ashmem.o
obj-$(CONFIG_ANDROID_LOGGER) += logger.o
obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_ANDROID_PMEM) += pmem.o
obj-$(CONFIG_ANDROID_SWITCH) += switch/
TODO:
- checkpatch.pl cleanups
- sparse fixes
- rename files to be not so "generic"
- make sure things build as modules properly
- add proper arch dependancies as needed
- audit userspace interfaces to make sure they are sane
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Brian Swetland <swetland@google.com>
/* include/linux/android_pmem.h
*
* Copyright (C) 2007 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _ANDROID_PMEM_H_
#define _ANDROID_PMEM_H_
#define PMEM_IOCTL_MAGIC 'p'
#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
/* This ioctl will allocate pmem space, backing the file, it will fail
* if the file already has an allocation, pass it the len as the argument
* to the ioctl */
#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
/* This will connect a one pmem file to another, pass the file that is already
* backed in memory as the argument to the ioctl
*/
#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
/* Returns the total size of the pmem region it is sent to as a pmem_region
* struct (with offset set to 0).
*/
#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
struct android_pmem_platform_data
{
const char* name;
/* starting physical address of memory region */
unsigned long start;
/* size of memory region */
unsigned long size;
/* set to indicate the region should not be managed with an allocator */
unsigned no_allocator;
/* set to indicate maps of this region should be cached, if a mix of
* cached and uncached is desired, set this and open the device with
* O_SYNC to get an uncached region */
unsigned cached;
/* The MSM7k has bits to enable a write buffer in the bus controller*/
unsigned buffered;
};
struct pmem_region {
unsigned long offset;
unsigned long len;
};
#ifdef CONFIG_ANDROID_PMEM
int is_pmem_file(struct file *file);
int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
unsigned long *end, struct file **filp);
int get_pmem_user_addr(struct file *file, unsigned long *start,
unsigned long *end);
void put_pmem_file(struct file* file);
void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
int pmem_setup(struct android_pmem_platform_data *pdata,
long (*ioctl)(struct file *, unsigned int, unsigned long),
int (*release)(struct inode *, struct file *));
int pmem_remap(struct pmem_region *region, struct file *file,
unsigned operation);
#else
static inline int is_pmem_file(struct file *file) { return 0; }
static inline int get_pmem_file(int fd, unsigned long *start,
unsigned long *vstart, unsigned long *end,
struct file **filp) { return -ENOSYS; }
static inline int get_pmem_user_addr(struct file *file, unsigned long *start,
unsigned long *end) { return -ENOSYS; }
static inline void put_pmem_file(struct file* file) { return; }
static inline void flush_pmem_file(struct file *file, unsigned long start,
unsigned long len) { return; }
static inline int pmem_setup(struct android_pmem_platform_data *pdata,
long (*ioctl)(struct file *, unsigned int, unsigned long),
int (*release)(struct inode *, struct file *)) { return -ENOSYS; }
static inline int pmem_remap(struct pmem_region *region, struct file *file,
unsigned operation) { return -ENOSYS; }
#endif
#endif //_ANDROID_PPP_H_
This diff is collapsed.
/*
* include/linux/ashmem.h
*
* Copyright 2008 Google Inc.
* Author: Robert Love
*
* This file is dual licensed. It may be redistributed and/or modified
* under the terms of the Apache 2.0 License OR version 2 of the GNU
* General Public License.
*/
#ifndef _LINUX_ASHMEM_H
#define _LINUX_ASHMEM_H
#include <linux/limits.h>
#include <linux/ioctl.h>
#define ASHMEM_NAME_LEN 256
#define ASHMEM_NAME_DEF "dev/ashmem"
/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
#define ASHMEM_NOT_PURGED 0
#define ASHMEM_WAS_PURGED 1
/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
#define ASHMEM_IS_UNPINNED 0
#define ASHMEM_IS_PINNED 1
struct ashmem_pin {
__u32 offset; /* offset into region, in bytes, page-aligned */
__u32 len; /* length forward from offset, in bytes, page-aligned */
};
#define __ASHMEMIOC 0x77
#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
#endif /* _LINUX_ASHMEM_H */
This diff is collapsed.
/*
* Copyright (C) 2008 Google, Inc.
*
* Based on, but no longer compatible with, the original
* OpenBinder.org binder driver interface, which is:
*
* Copyright (c) 2005 Palmsource, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_BINDER_H
#define _LINUX_BINDER_H
#include <linux/ioctl.h>
#define B_PACK_CHARS(c1, c2, c3, c4) \
((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
#define B_TYPE_LARGE 0x85
enum {
BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
};
enum {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
};
/*
* This is the flattened representation of a Binder object for transfer
* between processes. The 'offsets' supplied as part of a binder transaction
* contains offsets into the data where these structures occur. The Binder
* driver takes care of re-writing the structure type and data as it moves
* between processes.
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
unsigned long type;
unsigned long flags;
/* 8 bytes of data. */
union {
void *binder; /* local object */
signed long handle; /* remote object */
};
/* extra data associated with local object */
void *cookie;
};
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses apropriately.
*/
struct binder_write_read {
signed long write_size; /* bytes to write */
signed long write_consumed; /* bytes consumed by driver */
unsigned long write_buffer;
signed long read_size; /* bytes to read */
signed long read_consumed; /* bytes consumed by driver */
unsigned long read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
signed long protocol_version;
};
/* This is the current protocol version. */
#define BINDER_CURRENT_PROTOCOL_VERSION 7
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
#define BINDER_THREAD_EXIT _IOW('b', 8, int)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
/*
* NOTE: Two special error codes you should check for when calling
* in to the driver are:
*
* EINTR -- The operation has been interupted. This should be
* handled by retrying the ioctl() until a different error code
* is returned.
*
* ECONNREFUSED -- The driver is no longer accepting operations
* from your process. That is, the process is being destroyed.
* You should handle this by exiting from your process. Note
* that once this error code is returned, all further calls to
* the driver from any thread will return this same code.
*/
enum transaction_flags {
TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
};
struct binder_transaction_data {
/* The first two are only used for bcTRANSACTION and brTRANSACTION,
* identifying the target and contents of the transaction.
*/
union {
size_t handle; /* target descriptor of command transaction */
void *ptr; /* target descriptor of return transaction */
} target;
void *cookie; /* target object cookie */
unsigned int code; /* transaction command */
/* General information about the transaction. */
unsigned int flags;
pid_t sender_pid;
uid_t sender_euid;
size_t data_size; /* number of bytes of data */
size_t offsets_size; /* number of bytes of offsets */
/* If this transaction is inline, the data immediately
* follows here; otherwise, it ends with a pointer to
* the data buffer.
*/
union {
struct {
/* transaction data */
const void *buffer;
/* offsets from buffer to flat_binder_object structs */
const void *offsets;
} ptr;
uint8_t buf[8];
} data;
};
struct binder_ptr_cookie {
void *ptr;
void *cookie;
};
struct binder_pri_desc {
int priority;
int desc;
};
struct binder_pri_ptr_cookie {
int priority;
void *ptr;
void *cookie;
};
enum BinderDriverReturnProtocol {
BR_ERROR = _IOR('r', 0, int),
/*
* int: error code
*/
BR_OK = _IO('r', 1),
/* No parameters! */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
* binder_transaction_data: the received command.
*/
BR_ACQUIRE_RESULT = _IOR('r', 4, int),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
* Else the remote object has acquired a primary reference.
*/
BR_DEAD_REPLY = _IO('r', 5),
/*
* The target of the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
*/
BR_TRANSACTION_COMPLETE = _IO('r', 6),
/*
* No parameters... always refers to the last transaction requested
* (including replies). Note that this will be sent even for
* asynchronous transactions.
*/
BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
/*
* not currently supported
* int: priority
* void *: ptr to binder
* void *: cookie for binder
*/
BR_NOOP = _IO('r', 12),
/*
* No parameters. Do nothing and examine the next command. It exists
* primarily so that we can replace it with a BR_SPAWN_LOOPER command.
*/
BR_SPAWN_LOOPER = _IO('r', 13),
/*
* No parameters. The driver has determined that a process has no
* threads waiting to service incomming transactions. When a process
* receives this command, it must spawn a new service thread and
* register it via bcENTER_LOOPER.
*/
BR_FINISHED = _IO('r', 14),
/*
* not currently supported
* stop threadpool thread
*/
BR_DEAD_BINDER = _IOR('r', 15, void *),
/*
* void *: cookie
*/
BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
/*
* void *: cookie
*/
BR_FAILED_REPLY = _IO('r', 17),
/*
* The the last transaction (either a bcTRANSACTION or
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
*/
};
enum BinderDriverCommandProtocol {
BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
/*
* binder_transaction_data: the sent command.
*/
BC_ACQUIRE_RESULT = _IOW('c', 2, int),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
BC_FREE_BUFFER = _IOW('c', 3, int),
/*
* void *: ptr to transaction data received on a read
*/
BC_INCREFS = _IOW('c', 4, int),
BC_ACQUIRE = _IOW('c', 5, int),
BC_RELEASE = _IOW('c', 6, int),
BC_DECREFS = _IOW('c', 7, int),
/*
* int: descriptor
*/
BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie for binder
*/
BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
/*
* not currently supported
* int: priority
* int: descriptor
*/
BC_REGISTER_LOOPER = _IO('c', 11),
/*
* No parameters.
* Register a spawned looper thread with the device.
*/
BC_ENTER_LOOPER = _IO('c', 12),
BC_EXIT_LOOPER = _IO('c', 13),
/*
* No parameters.
* These two commands are sent as an application-level thread
* enters and exits the binder loop, respectively. They are
* used so the binder can have an accurate count of the number
* of looping threads it has available.
*/
BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie
*/
BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
/*
* void *: ptr to binder
* void *: cookie
*/
BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
/*
* void *: cookie
*/
};
#endif /* _LINUX_BINDER_H */
This diff is collapsed.
/* include/linux/logger.h
*
* Copyright (C) 2007-2008 Google, Inc.
* Author: Robert Love <rlove@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_LOGGER_H
#define _LINUX_LOGGER_H
#include <linux/types.h>
#include <linux/ioctl.h>
struct logger_entry {
__u16 len; /* length of the payload */
__u16 __pad; /* no matter what, we get 2 bytes of padding */
__s32 pid; /* generating process's pid */
__s32 tid; /* generating process's tid */
__s32 sec; /* seconds since Epoch */
__s32 nsec; /* nanoseconds */
char msg[0]; /* the entry's payload */
};
#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
#define LOGGER_LOG_MAIN "log_main" /* everything else */
#define LOGGER_ENTRY_MAX_LEN (4*1024)
#define LOGGER_ENTRY_MAX_PAYLOAD \
(LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
#define __LOGGERIO 0xAE
#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
#endif /* _LINUX_LOGGER_H */
/* drivers/misc/lowmemorykiller.c
*
* The lowmemorykiller driver lets user-space specify a set of memory thresholds
* where processes with a range of oom_adj values will get killed. Specify the
* minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
* number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
* files take a comma separated list of numbers in ascending order.
*
* For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
* "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
* processes with a oom_adj value of 8 or higher when the free memory drops
* below 4096 pages and kill processes with a oom_adj value of 0 or higher
* when the free memory drops below 1024 pages.
*
* The driver considers memory used for caches to be free, but if a large
* percentage of the cached memory is locked this can be very inaccurate
* and processes may not get killed until the normal oom killer is triggered.
*
* Copyright (C) 2007-2008 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/oom.h>
#include <linux/sched.h>
#include <linux/profile.h>
#include <linux/notifier.h>
static uint32_t lowmem_debug_level = 2;
static int lowmem_adj[6] = {
0,
1,
6,
12,
};
static int lowmem_adj_size = 4;
static size_t lowmem_minfree[6] = {
3 * 512, /* 6MB */
2 * 1024, /* 8MB */
4 * 1024, /* 16MB */
16 * 1024, /* 64MB */
};
static int lowmem_minfree_size = 4;
static struct task_struct *lowmem_deathpending;
#define lowmem_print(level, x...) \
do { \
if (lowmem_debug_level >= (level)) \
printk(x); \
} while (0)
static int
task_notify_func(struct notifier_block *self, unsigned long val, void *data);
static struct notifier_block task_nb = {
.notifier_call = task_notify_func,
};
static int
task_notify_func(struct notifier_block *self, unsigned long val, void *data)
{
struct task_struct *task = data;
if (task == lowmem_deathpending) {
lowmem_deathpending = NULL;
task_handoff_unregister(&task_nb);
}
return NOTIFY_OK;
}
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
{
struct task_struct *p;
struct task_struct *selected = NULL;
int rem = 0;
int tasksize;
int i;
int min_adj = OOM_ADJUST_MAX + 1;
int selected_tasksize = 0;
int selected_oom_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES);
int other_file = global_page_state(NR_FILE_PAGES) -
global_page_state(NR_SHMEM);
/*
* If we already have a death outstanding, then
* bail out right away; indicating to vmscan
* that we have nothing further to offer on
* this pass.
*
* Note: Currently you need CONFIG_PROFILING
* for this to work correctly.
*/
if (lowmem_deathpending)
return 0;
if (lowmem_adj_size < array_size)
array_size = lowmem_adj_size;
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
if (other_free < lowmem_minfree[i] &&
other_file < lowmem_minfree[i]) {
min_adj = lowmem_adj[i];
break;
}
}
if (sc->nr_to_scan > 0)
lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
sc->nr_to_scan, sc->gfp_mask, other_free,
other_file, min_adj);
rem = global_page_state(NR_ACTIVE_ANON) +
global_page_state(NR_ACTIVE_FILE) +
global_page_state(NR_INACTIVE_ANON) +
global_page_state(NR_INACTIVE_FILE);
if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
return rem;
}
selected_oom_adj = min_adj;
read_lock(&tasklist_lock);
for_each_process(p) {
struct mm_struct *mm;
struct signal_struct *sig;
int oom_adj;
task_lock(p);
mm = p->mm;
sig = p->signal;
if (!mm || !sig) {
task_unlock(p);
continue;
}
oom_adj = sig->oom_adj;
if (oom_adj < min_adj) {
task_unlock(p);
continue;
}
tasksize = get_mm_rss(mm);
task_unlock(p);
if (tasksize <= 0)
continue;
if (selected) {
if (oom_adj < selected_oom_adj)
continue;
if (oom_adj == selected_oom_adj &&
tasksize <= selected_tasksize)
continue;
}
selected = p;
selected_tasksize = tasksize;
selected_oom_adj = oom_adj;
lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
p->pid, p->comm, oom_adj, tasksize);
}
if (selected) {
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
selected->pid, selected->comm,
selected_oom_adj, selected_tasksize);
/*
* If CONFIG_PROFILING is off, then task_handoff_register()
* is a nop. In that case we don't want to stall the killer
* by setting lowmem_deathpending.
*/
#ifdef CONFIG_PROFILING
lowmem_deathpending = selected;
task_handoff_register(&task_nb);
#endif
force_sig(SIGKILL, selected);
rem -= selected_tasksize;
}
lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
sc->nr_to_scan, sc->gfp_mask, rem);
read_unlock(&tasklist_lock);
return rem;
}
static struct shrinker lowmem_shrinker = {
.shrink = lowmem_shrink,
.seeks = DEFAULT_SEEKS * 16
};
static int __init lowmem_init(void)
{
register_shrinker(&lowmem_shrinker);
return 0;
}
static void __exit lowmem_exit(void)
{
unregister_shrinker(&lowmem_shrinker);
}
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
S_IRUGO | S_IWUSR);
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
S_IRUGO | S_IWUSR);
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
module_init(lowmem_init);
module_exit(lowmem_exit);
MODULE_LICENSE("GPL");
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
struct ram_console_platform_data {
const char *bootinfo;
};
#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */
menuconfig ANDROID_SWITCH
tristate "Android Switch class support"
help
Say Y here to enable Android switch class support. This allows
monitoring switches by userspace via sysfs and uevent.
config ANDROID_SWITCH_GPIO
tristate "Android GPIO Switch support"
depends on GENERIC_GPIO && ANDROID_SWITCH
help
Say Y here to enable GPIO based switch support.
# Android Switch Class Driver
obj-$(CONFIG_ANDROID_SWITCH) += switch_class.o
obj-$(CONFIG_ANDROID_SWITCH_GPIO) += switch_gpio.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment