Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e476f944
Commit
e476f944
authored
Oct 07, 2016
by
Dan Williams
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-4.9/dax' into libnvdimm-for-next
parents
178d6f4b
4e65e938
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
365 additions
and
233 deletions
+365
-233
drivers/dax/Kconfig
drivers/dax/Kconfig
+5
-0
drivers/dax/dax.c
drivers/dax/dax.c
+350
-227
drivers/dax/dax.h
drivers/dax/dax.h
+3
-2
drivers/dax/pmem.c
drivers/dax/pmem.c
+4
-3
drivers/nvdimm/Kconfig
drivers/nvdimm/Kconfig
+1
-1
fs/char_dev.c
fs/char_dev.c
+1
-0
include/uapi/linux/magic.h
include/uapi/linux/magic.h
+1
-0
No files found.
drivers/dax/Kconfig
View file @
e476f944
...
...
@@ -23,4 +23,9 @@ config DEV_DAX_PMEM
Say Y if unsure
config NR_DEV_DAX
int "Maximum number of Device-DAX instances"
default 32768
range 256 2147483647
endif
drivers/dax/dax.c
View file @
e476f944
...
...
@@ -13,15 +13,25 @@
#include <linux/pagemap.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mount.h>
#include <linux/pfn_t.h>
#include <linux/hash.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include "dax.h"
static
int
dax_major
;
static
dev_t
dax_devt
;
static
struct
class
*
dax_class
;
static
DEFINE_IDA
(
dax_minor_ida
);
static
int
nr_dax
=
CONFIG_NR_DEV_DAX
;
module_param
(
nr_dax
,
int
,
S_IRUGO
);
static
struct
vfsmount
*
dax_mnt
;
static
struct
kmem_cache
*
dax_cache
__read_mostly
;
static
struct
super_block
*
dax_superblock
__read_mostly
;
MODULE_PARM_DESC
(
nr_dax
,
"max number of device-dax instances"
);
/**
* struct dax_region - mapping infrastructure for dax devices
...
...
@@ -48,7 +58,7 @@ struct dax_region {
* struct dax_dev - subdivision of a dax region
* @region - parent region
* @dev - device backing the character device
* @
kref - enable this data to be tracked in filp->private_
data
* @
cdev - core chardev
data
* @alive - !alive + rcu grace period == no new mappings can be established
* @id - child id in the region
* @num_resources - number of physical address extents in this device
...
...
@@ -56,41 +66,139 @@ struct dax_region {
*/
struct
dax_dev
{
struct
dax_region
*
region
;
struct
device
*
dev
;
struct
kref
kref
;
struct
inode
*
inode
;
struct
device
dev
;
struct
cdev
cdev
;
bool
alive
;
int
id
;
int
num_resources
;
struct
resource
res
[
0
];
};
static
void
dax_region_free
(
struct
kref
*
kref
)
static
struct
inode
*
dax_alloc_inode
(
struct
super_block
*
sb
)
{
struct
dax_region
*
dax_region
;
return
kmem_cache_alloc
(
dax_cache
,
GFP_KERNEL
);
}
dax_region
=
container_of
(
kref
,
struct
dax_region
,
kref
);
kfree
(
dax_region
);
static
void
dax_i_callback
(
struct
rcu_head
*
head
)
{
struct
inode
*
inode
=
container_of
(
head
,
struct
inode
,
i_rcu
);
kmem_cache_free
(
dax_cache
,
inode
);
}
void
dax_region_put
(
struct
dax_region
*
dax_region
)
static
void
dax_destroy_inode
(
struct
inode
*
inode
)
{
kref_put
(
&
dax_region
->
kref
,
dax_region_free
);
call_rcu
(
&
inode
->
i_rcu
,
dax_i_callback
);
}
EXPORT_SYMBOL_GPL
(
dax_region_put
);
static
void
dax_dev_free
(
struct
kref
*
kref
)
static
const
struct
super_operations
dax_sops
=
{
.
statfs
=
simple_statfs
,
.
alloc_inode
=
dax_alloc_inode
,
.
destroy_inode
=
dax_destroy_inode
,
.
drop_inode
=
generic_delete_inode
,
};
static
struct
dentry
*
dax_mount
(
struct
file_system_type
*
fs_type
,
int
flags
,
const
char
*
dev_name
,
void
*
data
)
{
struct
dax_dev
*
dax_dev
;
return
mount_pseudo
(
fs_type
,
"dax:"
,
&
dax_sops
,
NULL
,
DAXFS_MAGIC
);
}
dax_dev
=
container_of
(
kref
,
struct
dax_dev
,
kref
);
dax_region_put
(
dax_dev
->
region
);
kfree
(
dax_dev
);
static
struct
file_system_type
dax_type
=
{
.
name
=
"dax"
,
.
mount
=
dax_mount
,
.
kill_sb
=
kill_anon_super
,
};
static
int
dax_test
(
struct
inode
*
inode
,
void
*
data
)
{
return
inode
->
i_cdev
==
data
;
}
static
int
dax_set
(
struct
inode
*
inode
,
void
*
data
)
{
inode
->
i_cdev
=
data
;
return
0
;
}
static
struct
inode
*
dax_inode_get
(
struct
cdev
*
cdev
,
dev_t
devt
)
{
struct
inode
*
inode
;
inode
=
iget5_locked
(
dax_superblock
,
hash_32
(
devt
+
DAXFS_MAGIC
,
31
),
dax_test
,
dax_set
,
cdev
);
if
(
!
inode
)
return
NULL
;
if
(
inode
->
i_state
&
I_NEW
)
{
inode
->
i_mode
=
S_IFCHR
;
inode
->
i_flags
=
S_DAX
;
inode
->
i_rdev
=
devt
;
mapping_set_gfp_mask
(
&
inode
->
i_data
,
GFP_USER
);
unlock_new_inode
(
inode
);
}
return
inode
;
}
static
void
init_once
(
void
*
inode
)
{
inode_init_once
(
inode
);
}
static
int
dax_inode_init
(
void
)
{
int
rc
;
dax_cache
=
kmem_cache_create
(
"dax_cache"
,
sizeof
(
struct
inode
),
0
,
(
SLAB_HWCACHE_ALIGN
|
SLAB_RECLAIM_ACCOUNT
|
SLAB_MEM_SPREAD
|
SLAB_ACCOUNT
),
init_once
);
if
(
!
dax_cache
)
return
-
ENOMEM
;
rc
=
register_filesystem
(
&
dax_type
);
if
(
rc
)
goto
err_register_fs
;
dax_mnt
=
kern_mount
(
&
dax_type
);
if
(
IS_ERR
(
dax_mnt
))
{
rc
=
PTR_ERR
(
dax_mnt
);
goto
err_mount
;
}
dax_superblock
=
dax_mnt
->
mnt_sb
;
return
0
;
err_mount:
unregister_filesystem
(
&
dax_type
);
err_register_fs:
kmem_cache_destroy
(
dax_cache
);
return
rc
;
}
static
void
dax_dev_put
(
struct
dax_dev
*
dax_dev
)
static
void
dax_inode_exit
(
void
)
{
kern_unmount
(
dax_mnt
);
unregister_filesystem
(
&
dax_type
);
kmem_cache_destroy
(
dax_cache
);
}
static
void
dax_region_free
(
struct
kref
*
kref
)
{
struct
dax_region
*
dax_region
;
dax_region
=
container_of
(
kref
,
struct
dax_region
,
kref
);
kfree
(
dax_region
);
}
void
dax_region_put
(
struct
dax_region
*
dax_region
)
{
kref_put
(
&
dax_
dev
->
kref
,
dax_dev
_free
);
kref_put
(
&
dax_
region
->
kref
,
dax_region
_free
);
}
EXPORT_SYMBOL_GPL
(
dax_region_put
);
struct
dax_region
*
alloc_dax_region
(
struct
device
*
parent
,
int
region_id
,
struct
resource
*
res
,
unsigned
int
align
,
void
*
addr
,
...
...
@@ -98,8 +206,11 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
{
struct
dax_region
*
dax_region
;
dax_region
=
kzalloc
(
sizeof
(
*
dax_region
),
GFP_KERNEL
);
if
(
!
IS_ALIGNED
(
res
->
start
,
align
)
||
!
IS_ALIGNED
(
resource_size
(
res
),
align
))
return
NULL
;
dax_region
=
kzalloc
(
sizeof
(
*
dax_region
),
GFP_KERNEL
);
if
(
!
dax_region
)
return
NULL
;
...
...
@@ -116,10 +227,15 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
}
EXPORT_SYMBOL_GPL
(
alloc_dax_region
);
static
struct
dax_dev
*
to_dax_dev
(
struct
device
*
dev
)
{
return
container_of
(
dev
,
struct
dax_dev
,
dev
);
}
static
ssize_t
size_show
(
struct
device
*
dev
,
struct
device_attribute
*
attr
,
char
*
buf
)
{
struct
dax_dev
*
dax_dev
=
dev_get_drvdata
(
dev
);
struct
dax_dev
*
dax_dev
=
to_dax_dev
(
dev
);
unsigned
long
long
size
=
0
;
int
i
;
...
...
@@ -144,180 +260,11 @@ static const struct attribute_group *dax_attribute_groups[] = {
NULL
,
};
static
void
unregister_dax_dev
(
void
*
_dev
)
{
struct
device
*
dev
=
_dev
;
struct
dax_dev
*
dax_dev
=
dev_get_drvdata
(
dev
);
struct
dax_region
*
dax_region
=
dax_dev
->
region
;
dev_dbg
(
dev
,
"%s
\n
"
,
__func__
);
/*
* Note, rcu is not protecting the liveness of dax_dev, rcu is
* ensuring that any fault handlers that might have seen
* dax_dev->alive == true, have completed. Any fault handlers
* that start after synchronize_rcu() has started will abort
* upon seeing dax_dev->alive == false.
*/
dax_dev
->
alive
=
false
;
synchronize_rcu
();
get_device
(
dev
);
device_unregister
(
dev
);
ida_simple_remove
(
&
dax_region
->
ida
,
dax_dev
->
id
);
ida_simple_remove
(
&
dax_minor_ida
,
MINOR
(
dev
->
devt
));
put_device
(
dev
);
dax_dev_put
(
dax_dev
);
}
int
devm_create_dax_dev
(
struct
dax_region
*
dax_region
,
struct
resource
*
res
,
int
count
)
{
struct
device
*
parent
=
dax_region
->
dev
;
struct
dax_dev
*
dax_dev
;
struct
device
*
dev
;
int
rc
,
minor
;
dev_t
dev_t
;
dax_dev
=
kzalloc
(
sizeof
(
*
dax_dev
)
+
sizeof
(
*
res
)
*
count
,
GFP_KERNEL
);
if
(
!
dax_dev
)
return
-
ENOMEM
;
memcpy
(
dax_dev
->
res
,
res
,
sizeof
(
*
res
)
*
count
);
dax_dev
->
num_resources
=
count
;
kref_init
(
&
dax_dev
->
kref
);
dax_dev
->
alive
=
true
;
dax_dev
->
region
=
dax_region
;
kref_get
(
&
dax_region
->
kref
);
dax_dev
->
id
=
ida_simple_get
(
&
dax_region
->
ida
,
0
,
0
,
GFP_KERNEL
);
if
(
dax_dev
->
id
<
0
)
{
rc
=
dax_dev
->
id
;
goto
err_id
;
}
minor
=
ida_simple_get
(
&
dax_minor_ida
,
0
,
0
,
GFP_KERNEL
);
if
(
minor
<
0
)
{
rc
=
minor
;
goto
err_minor
;
}
dev_t
=
MKDEV
(
dax_major
,
minor
);
dev
=
device_create_with_groups
(
dax_class
,
parent
,
dev_t
,
dax_dev
,
dax_attribute_groups
,
"dax%d.%d"
,
dax_region
->
id
,
dax_dev
->
id
);
if
(
IS_ERR
(
dev
))
{
rc
=
PTR_ERR
(
dev
);
goto
err_create
;
}
dax_dev
->
dev
=
dev
;
rc
=
devm_add_action_or_reset
(
dax_region
->
dev
,
unregister_dax_dev
,
dev
);
if
(
rc
)
return
rc
;
return
0
;
err_create:
ida_simple_remove
(
&
dax_minor_ida
,
minor
);
err_minor:
ida_simple_remove
(
&
dax_region
->
ida
,
dax_dev
->
id
);
err_id:
dax_dev_put
(
dax_dev
);
return
rc
;
}
EXPORT_SYMBOL_GPL
(
devm_create_dax_dev
);
/* return an unmapped area aligned to the dax region specified alignment */
static
unsigned
long
dax_dev_get_unmapped_area
(
struct
file
*
filp
,
unsigned
long
addr
,
unsigned
long
len
,
unsigned
long
pgoff
,
unsigned
long
flags
)
{
unsigned
long
off
,
off_end
,
off_align
,
len_align
,
addr_align
,
align
;
struct
dax_dev
*
dax_dev
=
filp
?
filp
->
private_data
:
NULL
;
struct
dax_region
*
dax_region
;
if
(
!
dax_dev
||
addr
)
goto
out
;
dax_region
=
dax_dev
->
region
;
align
=
dax_region
->
align
;
off
=
pgoff
<<
PAGE_SHIFT
;
off_end
=
off
+
len
;
off_align
=
round_up
(
off
,
align
);
if
((
off_end
<=
off_align
)
||
((
off_end
-
off_align
)
<
align
))
goto
out
;
len_align
=
len
+
align
;
if
((
off
+
len_align
)
<
off
)
goto
out
;
addr_align
=
current
->
mm
->
get_unmapped_area
(
filp
,
addr
,
len_align
,
pgoff
,
flags
);
if
(
!
IS_ERR_VALUE
(
addr_align
))
{
addr_align
+=
(
off
-
addr_align
)
&
(
align
-
1
);
return
addr_align
;
}
out:
return
current
->
mm
->
get_unmapped_area
(
filp
,
addr
,
len
,
pgoff
,
flags
);
}
static
int
__match_devt
(
struct
device
*
dev
,
const
void
*
data
)
{
const
dev_t
*
devt
=
data
;
return
dev
->
devt
==
*
devt
;
}
static
struct
device
*
dax_dev_find
(
dev_t
dev_t
)
{
return
class_find_device
(
dax_class
,
NULL
,
&
dev_t
,
__match_devt
);
}
static
int
dax_dev_open
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
struct
dax_dev
*
dax_dev
=
NULL
;
struct
device
*
dev
;
dev
=
dax_dev_find
(
inode
->
i_rdev
);
if
(
!
dev
)
return
-
ENXIO
;
device_lock
(
dev
);
dax_dev
=
dev_get_drvdata
(
dev
);
if
(
dax_dev
)
{
dev_dbg
(
dev
,
"%s
\n
"
,
__func__
);
filp
->
private_data
=
dax_dev
;
kref_get
(
&
dax_dev
->
kref
);
inode
->
i_flags
=
S_DAX
;
}
device_unlock
(
dev
);
if
(
!
dax_dev
)
{
put_device
(
dev
);
return
-
ENXIO
;
}
return
0
;
}
static
int
dax_dev_release
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
struct
dax_dev
*
dax_dev
=
filp
->
private_data
;
struct
device
*
dev
=
dax_dev
->
dev
;
dev_dbg
(
dax_dev
->
dev
,
"%s
\n
"
,
__func__
);
dax_dev_put
(
dax_dev
);
put_device
(
dev
);
return
0
;
}
static
int
check_vma
(
struct
dax_dev
*
dax_dev
,
struct
vm_area_struct
*
vma
,
const
char
*
func
)
{
struct
dax_region
*
dax_region
=
dax_dev
->
region
;
struct
device
*
dev
=
dax_dev
->
dev
;
struct
device
*
dev
=
&
dax_dev
->
dev
;
unsigned
long
mask
;
if
(
!
dax_dev
->
alive
)
...
...
@@ -382,7 +329,7 @@ static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
struct
vm_fault
*
vmf
)
{
unsigned
long
vaddr
=
(
unsigned
long
)
vmf
->
virtual_address
;
struct
device
*
dev
=
dax_dev
->
dev
;
struct
device
*
dev
=
&
dax_dev
->
dev
;
struct
dax_region
*
dax_region
;
int
rc
=
VM_FAULT_SIGBUS
;
phys_addr_t
phys
;
...
...
@@ -422,7 +369,7 @@ static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct
file
*
filp
=
vma
->
vm_file
;
struct
dax_dev
*
dax_dev
=
filp
->
private_data
;
dev_dbg
(
dax_dev
->
dev
,
"%s: %s: %s (%#lx - %#lx)
\n
"
,
__func__
,
dev_dbg
(
&
dax_dev
->
dev
,
"%s: %s: %s (%#lx - %#lx)
\n
"
,
__func__
,
current
->
comm
,
(
vmf
->
flags
&
FAULT_FLAG_WRITE
)
?
"write"
:
"read"
,
vma
->
vm_start
,
vma
->
vm_end
);
rcu_read_lock
();
...
...
@@ -437,7 +384,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
unsigned
int
flags
)
{
unsigned
long
pmd_addr
=
addr
&
PMD_MASK
;
struct
device
*
dev
=
dax_dev
->
dev
;
struct
device
*
dev
=
&
dax_dev
->
dev
;
struct
dax_region
*
dax_region
;
phys_addr_t
phys
;
pgoff_t
pgoff
;
...
...
@@ -479,7 +426,7 @@ static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
struct
file
*
filp
=
vma
->
vm_file
;
struct
dax_dev
*
dax_dev
=
filp
->
private_data
;
dev_dbg
(
dax_dev
->
dev
,
"%s: %s: %s (%#lx - %#lx)
\n
"
,
__func__
,
dev_dbg
(
&
dax_dev
->
dev
,
"%s: %s: %s (%#lx - %#lx)
\n
"
,
__func__
,
current
->
comm
,
(
flags
&
FAULT_FLAG_WRITE
)
?
"write"
:
"read"
,
vma
->
vm_start
,
vma
->
vm_end
);
...
...
@@ -490,81 +437,257 @@ static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
return
rc
;
}
static
void
dax_dev_vm_open
(
struct
vm_area_struct
*
vma
)
{
struct
file
*
filp
=
vma
->
vm_file
;
struct
dax_dev
*
dax_dev
=
filp
->
private_data
;
dev_dbg
(
dax_dev
->
dev
,
"%s
\n
"
,
__func__
);
kref_get
(
&
dax_dev
->
kref
);
}
static
void
dax_dev_vm_close
(
struct
vm_area_struct
*
vma
)
{
struct
file
*
filp
=
vma
->
vm_file
;
struct
dax_dev
*
dax_dev
=
filp
->
private_data
;
dev_dbg
(
dax_dev
->
dev
,
"%s
\n
"
,
__func__
);
dax_dev_put
(
dax_dev
);
}
static
const
struct
vm_operations_struct
dax_dev_vm_ops
=
{
.
fault
=
dax_dev_fault
,
.
pmd_fault
=
dax_dev_pmd_fault
,
.
open
=
dax_dev_vm_open
,
.
close
=
dax_dev_vm_close
,
};
static
int
dax_
dev_
mmap
(
struct
file
*
filp
,
struct
vm_area_struct
*
vma
)
static
int
dax_mmap
(
struct
file
*
filp
,
struct
vm_area_struct
*
vma
)
{
struct
dax_dev
*
dax_dev
=
filp
->
private_data
;
int
rc
;
dev_dbg
(
dax_dev
->
dev
,
"%s
\n
"
,
__func__
);
dev_dbg
(
&
dax_dev
->
dev
,
"%s
\n
"
,
__func__
);
rc
=
check_vma
(
dax_dev
,
vma
,
__func__
);
if
(
rc
)
return
rc
;
kref_get
(
&
dax_dev
->
kref
);
vma
->
vm_ops
=
&
dax_dev_vm_ops
;
vma
->
vm_flags
|=
VM_MIXEDMAP
|
VM_HUGEPAGE
;
return
0
;
}
/* return an unmapped area aligned to the dax region specified alignment */
static
unsigned
long
dax_get_unmapped_area
(
struct
file
*
filp
,
unsigned
long
addr
,
unsigned
long
len
,
unsigned
long
pgoff
,
unsigned
long
flags
)
{
unsigned
long
off
,
off_end
,
off_align
,
len_align
,
addr_align
,
align
;
struct
dax_dev
*
dax_dev
=
filp
?
filp
->
private_data
:
NULL
;
struct
dax_region
*
dax_region
;
if
(
!
dax_dev
||
addr
)
goto
out
;
dax_region
=
dax_dev
->
region
;
align
=
dax_region
->
align
;
off
=
pgoff
<<
PAGE_SHIFT
;
off_end
=
off
+
len
;
off_align
=
round_up
(
off
,
align
);
if
((
off_end
<=
off_align
)
||
((
off_end
-
off_align
)
<
align
))
goto
out
;
len_align
=
len
+
align
;
if
((
off
+
len_align
)
<
off
)
goto
out
;
addr_align
=
current
->
mm
->
get_unmapped_area
(
filp
,
addr
,
len_align
,
pgoff
,
flags
);
if
(
!
IS_ERR_VALUE
(
addr_align
))
{
addr_align
+=
(
off
-
addr_align
)
&
(
align
-
1
);
return
addr_align
;
}
out:
return
current
->
mm
->
get_unmapped_area
(
filp
,
addr
,
len
,
pgoff
,
flags
);
}
static
int
dax_open
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
struct
dax_dev
*
dax_dev
;
dax_dev
=
container_of
(
inode
->
i_cdev
,
struct
dax_dev
,
cdev
);
dev_dbg
(
&
dax_dev
->
dev
,
"%s
\n
"
,
__func__
);
inode
->
i_mapping
=
dax_dev
->
inode
->
i_mapping
;
inode
->
i_mapping
->
host
=
dax_dev
->
inode
;
filp
->
f_mapping
=
inode
->
i_mapping
;
filp
->
private_data
=
dax_dev
;
inode
->
i_flags
=
S_DAX
;
return
0
;
}
static
int
dax_release
(
struct
inode
*
inode
,
struct
file
*
filp
)
{
struct
dax_dev
*
dax_dev
=
filp
->
private_data
;
dev_dbg
(
&
dax_dev
->
dev
,
"%s
\n
"
,
__func__
);
return
0
;
}
static
const
struct
file_operations
dax_fops
=
{
.
llseek
=
noop_llseek
,
.
owner
=
THIS_MODULE
,
.
open
=
dax_
dev_
open
,
.
release
=
dax_
dev_
release
,
.
get_unmapped_area
=
dax_
dev_
get_unmapped_area
,
.
mmap
=
dax_
dev_
mmap
,
.
open
=
dax_open
,
.
release
=
dax_release
,
.
get_unmapped_area
=
dax_get_unmapped_area
,
.
mmap
=
dax_mmap
,
};
static
void
dax_dev_release
(
struct
device
*
dev
)
{
struct
dax_dev
*
dax_dev
=
to_dax_dev
(
dev
);
struct
dax_region
*
dax_region
=
dax_dev
->
region
;
ida_simple_remove
(
&
dax_region
->
ida
,
dax_dev
->
id
);
ida_simple_remove
(
&
dax_minor_ida
,
MINOR
(
dev
->
devt
));
dax_region_put
(
dax_region
);
iput
(
dax_dev
->
inode
);
kfree
(
dax_dev
);
}
static
void
unregister_dax_dev
(
void
*
dev
)
{
struct
dax_dev
*
dax_dev
=
to_dax_dev
(
dev
);
struct
cdev
*
cdev
=
&
dax_dev
->
cdev
;
dev_dbg
(
dev
,
"%s
\n
"
,
__func__
);
/*
* Note, rcu is not protecting the liveness of dax_dev, rcu is
* ensuring that any fault handlers that might have seen
* dax_dev->alive == true, have completed. Any fault handlers
* that start after synchronize_rcu() has started will abort
* upon seeing dax_dev->alive == false.
*/
dax_dev
->
alive
=
false
;
synchronize_rcu
();
unmap_mapping_range
(
dax_dev
->
inode
->
i_mapping
,
0
,
0
,
1
);
cdev_del
(
cdev
);
device_unregister
(
dev
);
}
struct
dax_dev
*
devm_create_dax_dev
(
struct
dax_region
*
dax_region
,
struct
resource
*
res
,
int
count
)
{
struct
device
*
parent
=
dax_region
->
dev
;
struct
dax_dev
*
dax_dev
;
int
rc
=
0
,
minor
,
i
;
struct
device
*
dev
;
struct
cdev
*
cdev
;
dev_t
dev_t
;
dax_dev
=
kzalloc
(
sizeof
(
*
dax_dev
)
+
sizeof
(
*
res
)
*
count
,
GFP_KERNEL
);
if
(
!
dax_dev
)
return
ERR_PTR
(
-
ENOMEM
);
for
(
i
=
0
;
i
<
count
;
i
++
)
{
if
(
!
IS_ALIGNED
(
res
[
i
].
start
,
dax_region
->
align
)
||
!
IS_ALIGNED
(
resource_size
(
&
res
[
i
]),
dax_region
->
align
))
{
rc
=
-
EINVAL
;
break
;
}
dax_dev
->
res
[
i
].
start
=
res
[
i
].
start
;
dax_dev
->
res
[
i
].
end
=
res
[
i
].
end
;
}
if
(
i
<
count
)
goto
err_id
;
dax_dev
->
id
=
ida_simple_get
(
&
dax_region
->
ida
,
0
,
0
,
GFP_KERNEL
);
if
(
dax_dev
->
id
<
0
)
{
rc
=
dax_dev
->
id
;
goto
err_id
;
}
minor
=
ida_simple_get
(
&
dax_minor_ida
,
0
,
0
,
GFP_KERNEL
);
if
(
minor
<
0
)
{
rc
=
minor
;
goto
err_minor
;
}
dev_t
=
MKDEV
(
MAJOR
(
dax_devt
),
minor
);
dev
=
&
dax_dev
->
dev
;
dax_dev
->
inode
=
dax_inode_get
(
&
dax_dev
->
cdev
,
dev_t
);
if
(
!
dax_dev
->
inode
)
{
rc
=
-
ENOMEM
;
goto
err_inode
;
}
/* device_initialize() so cdev can reference kobj parent */
device_initialize
(
dev
);
cdev
=
&
dax_dev
->
cdev
;
cdev_init
(
cdev
,
&
dax_fops
);
cdev
->
owner
=
parent
->
driver
->
owner
;
cdev
->
kobj
.
parent
=
&
dev
->
kobj
;
rc
=
cdev_add
(
&
dax_dev
->
cdev
,
dev_t
,
1
);
if
(
rc
)
goto
err_cdev
;
/* from here on we're committed to teardown via dax_dev_release() */
dax_dev
->
num_resources
=
count
;
dax_dev
->
alive
=
true
;
dax_dev
->
region
=
dax_region
;
kref_get
(
&
dax_region
->
kref
);
dev
->
devt
=
dev_t
;
dev
->
class
=
dax_class
;
dev
->
parent
=
parent
;
dev
->
groups
=
dax_attribute_groups
;
dev
->
release
=
dax_dev_release
;
dev_set_name
(
dev
,
"dax%d.%d"
,
dax_region
->
id
,
dax_dev
->
id
);
rc
=
device_add
(
dev
);
if
(
rc
)
{
put_device
(
dev
);
return
ERR_PTR
(
rc
);
}
rc
=
devm_add_action_or_reset
(
dax_region
->
dev
,
unregister_dax_dev
,
dev
);
if
(
rc
)
return
ERR_PTR
(
rc
);
return
dax_dev
;
err_cdev:
iput
(
dax_dev
->
inode
);
err_inode:
ida_simple_remove
(
&
dax_minor_ida
,
minor
);
err_minor:
ida_simple_remove
(
&
dax_region
->
ida
,
dax_dev
->
id
);
err_id:
kfree
(
dax_dev
);
return
ERR_PTR
(
rc
);
}
EXPORT_SYMBOL_GPL
(
devm_create_dax_dev
);
static
int
__init
dax_init
(
void
)
{
int
rc
;
rc
=
register_chrdev
(
0
,
"dax"
,
&
dax_fops
);
if
(
rc
<
0
)
rc
=
dax_inode_init
(
);
if
(
rc
)
return
rc
;
dax_major
=
rc
;
nr_dax
=
max
(
nr_dax
,
256
);
rc
=
alloc_chrdev_region
(
&
dax_devt
,
0
,
nr_dax
,
"dax"
);
if
(
rc
)
goto
err_chrdev
;
dax_class
=
class_create
(
THIS_MODULE
,
"dax"
);
if
(
IS_ERR
(
dax_class
))
{
unregister_chrdev
(
dax_major
,
"dax"
);
return
PTR_ERR
(
dax_class
)
;
rc
=
PTR_ERR
(
dax_class
);
goto
err_class
;
}
return
0
;
err_class:
unregister_chrdev_region
(
dax_devt
,
nr_dax
);
err_chrdev:
dax_inode_exit
();
return
rc
;
}
static
void
__exit
dax_exit
(
void
)
{
class_destroy
(
dax_class
);
unregister_chrdev
(
dax_major
,
"dax"
);
unregister_chrdev
_region
(
dax_devt
,
nr_dax
);
ida_destroy
(
&
dax_minor_ida
);
dax_inode_exit
();
}
MODULE_AUTHOR
(
"Intel Corporation"
);
...
...
drivers/dax/dax.h
View file @
e476f944
...
...
@@ -13,12 +13,13 @@
#ifndef __DAX_H__
#define __DAX_H__
struct
device
;
struct
dax_dev
;
struct
resource
;
struct
dax_region
;
void
dax_region_put
(
struct
dax_region
*
dax_region
);
struct
dax_region
*
alloc_dax_region
(
struct
device
*
parent
,
int
region_id
,
struct
resource
*
res
,
unsigned
int
align
,
void
*
addr
,
unsigned
long
flags
);
int
devm_create_dax_dev
(
struct
dax_region
*
dax_region
,
struct
resource
*
res
,
int
count
);
struct
dax_dev
*
devm_create_dax_dev
(
struct
dax_region
*
dax_region
,
struct
resource
*
res
,
int
count
);
#endif
/* __DAX_H__ */
drivers/dax/pmem.c
View file @
e476f944
...
...
@@ -24,7 +24,7 @@ struct dax_pmem {
struct
completion
cmp
;
};
struct
dax_pmem
*
to_dax_pmem
(
struct
percpu_ref
*
ref
)
st
atic
st
ruct
dax_pmem
*
to_dax_pmem
(
struct
percpu_ref
*
ref
)
{
return
container_of
(
ref
,
struct
dax_pmem
,
ref
);
}
...
...
@@ -61,6 +61,7 @@ static int dax_pmem_probe(struct device *dev)
int
rc
;
void
*
addr
;
struct
resource
res
;
struct
dax_dev
*
dax_dev
;
struct
nd_pfn_sb
*
pfn_sb
;
struct
dax_pmem
*
dax_pmem
;
struct
nd_region
*
nd_region
;
...
...
@@ -126,12 +127,12 @@ static int dax_pmem_probe(struct device *dev)
return
-
ENOMEM
;
/* TODO: support for subdividing a dax region... */
rc
=
devm_create_dax_dev
(
dax_region
,
&
res
,
1
);
dax_dev
=
devm_create_dax_dev
(
dax_region
,
&
res
,
1
);
/* child dax_dev instances now own the lifetime of the dax_region */
dax_region_put
(
dax_region
);
return
rc
;
return
PTR_ERR_OR_ZERO
(
dax_dev
)
;
}
static
struct
nd_device_driver
dax_pmem_driver
=
{
...
...
drivers/nvdimm/Kconfig
View file @
e476f944
...
...
@@ -89,7 +89,7 @@ config NVDIMM_PFN
Select Y if unsure
config NVDIMM_DAX
bool
"NVDIMM DAX: Raw access to persistent memory"
tristate
"NVDIMM DAX: Raw access to persistent memory"
default LIBNVDIMM
depends on NVDIMM_PFN
help
...
...
fs/char_dev.c
View file @
e476f944
...
...
@@ -406,6 +406,7 @@ void cd_forget(struct inode *inode)
spin_lock
(
&
cdev_lock
);
list_del_init
(
&
inode
->
i_devices
);
inode
->
i_cdev
=
NULL
;
inode
->
i_mapping
=
&
inode
->
i_data
;
spin_unlock
(
&
cdev_lock
);
}
...
...
include/uapi/linux/magic.h
View file @
e476f944
...
...
@@ -65,6 +65,7 @@
#define V9FS_MAGIC 0x01021997
#define BDEVFS_MAGIC 0x62646576
#define DAXFS_MAGIC 0x64646178
#define BINFMTFS_MAGIC 0x42494e4d
#define DEVPTS_SUPER_MAGIC 0x1cd1
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment