Commit bedd3b65 authored by Kirill Smelkov's avatar Kirill Smelkov

X incomplete review of newapi 2

many things from na1.patch still not yet integrated back.
parent 48be1a10
diff --git a/nodefs/bridge.go b/nodefs/bridge.go
index d51e897..5169a37 100644
--- a/nodefs/bridge.go
+++ b/nodefs/bridge.go
@@ -29,9 +29,15 @@ type rawBridge struct {
options Options
root *Inode
+ // we maintain index
+ //
+ // ID -> inode
+ //
+ // so that we can serve FUSE operational requests where nodes are specified by
+ // their ID.
mu sync.Mutex
- nodes []mapEntry
- free []uint64
+ nodes []mapEntry // ID -> Inode; some inodes may be here in "forgotten" state not yet removed
+ free []uint64 // free ID
files []fileEntry
freeFiles []uint64
@@ -70,65 +76,308 @@ func NewNodeFS(root Node, opts *Options) fuse.RawFileSystem {
return bridge
}
+// XXX kill?
func (b *rawBridge) inode(id uint64, fh uint64) (*Inode, fileEntry) {
- b.mu.Lock()
- defer b.mu.Unlock()
+// b.mu.Lock()
+// defer b.mu.Unlock() // XXX not good to return with released lock
return b.nodes[id].inode, b.files[fh]
}
-func (b *rawBridge) Lookup(header *fuse.InHeader, name string, out *fuse.EntryOut) (status fuse.Status) {
- parent, _ := b.inode(header.NodeId, 0)
+// Lookup & Forget
+//
+// Lookup checks inode.children and potentially calls inode.node.Lookup;
+// increments ichild.lookupCount. Forget decrements inode.lookupCount and, if
+// that reaches 0, unregisters inode from its parents.
+//
+// To avoid races, whenever we need to update group of inodes (for example
+// inode and its parents) we take locks on all inodes in the group.
+//
+// Before getting to take locks on a group, we start from locking only one
+// inode - the inode in question itself. This is required becuase we need to
+// first inspect the inode to decide what to do next and what is the inode
+// group to change.
+//
+// Going from only one inode locked to locked inode group requires to unlock
+// the inode in the middle. This creates possibility that at the time when
+// inode group will be locked, the inode itself was already changed. If the
+// inode was indeed changed, we restart the whole procedure.
+//
+// Inode.changeCounter is used to detect if inode was changed or not - it is
+// incremented every time the inode is modified.
+//
+// rawBridge.mu (the lock for ID -> inode index) is locked either:
+//
+// - on its own, or
+// - always after locking some inodes.
+//
+// it is never locked the other way (e.g. rawBridge.mu -> inode).
+//
+//
+// Note on FUSE FORGET semantic: Forget(inodeID) means filesystem client
+// (usually the kernel) evicts inode from its cache. It can happen that the
+// inode is directory and it children inodes are still kept in client cache
+// with nlookup > 0. If we see such a forgotten directory, we don't unlink it
+// from the tree until all of its children are also forgotten.
- child, code := parent.node.Lookup(context.TODO(), name, out)
- if !code.Ok() {
- if b.options.NegativeTimeout != nil {
- out.SetEntryTimeout(*b.options.NegativeTimeout)
+func (b *rawBridge) Forget(nodeid, nlookup uint64) {
+ b.mu.Lock()
+ inode := b.nodes[nodeid].inode
+ b.mu.Unlock()
+
+ // XXX if inode == nil || already forgoten -> log (client bug)
+ b.forgetInode(inode, nlookup)
+}
+
+func (b *rawBridge) forgetInode(inode *Inode, nlookup uint64) {
+ // lock inode, extract parents, unlock inode, relock(inode, parents),
+ // verify that inode had not changed (and retry if it changed), and
+ // perform unlinking.
+retry:
+ for {
+ inode.mu.Lock()
+ if nlookup != 0 {
+ inode.lookupCount -= nlookup // XXX log if it goes < 0
+ inode.changeCounter++
}
- return code
+ if inode.lookupCount != 0 {
+ inode.mu.Unlock()
+ return
+ }
+
+ // don't perform unlinking if inode was already forgotten.
+ //
+ // Examples when inode could be already forgotten:
+ //
+ // - if Forget, in between inode -> inode + parents relock, was
+ // interrupted by Lookup + another Forget -> we could retry here
+ // with the inode already forgotten.
+ //
+ // - if inode is directory and its forgetting was delayed due
+ // to alive children, the forget could be triggered by last
+ // forgotten children, which could be simultaneous with new
+ // Lookup and again Forget.
+ if inode.nodeID == 0 {
+ inode.mu.Unlock()
+ return
+ }
+
+ if len(inode.children) != 0 {
+ // client forgot about directory, but not about its children.
+ // delay directory inode forget to until children are alive.
+ inode.mu.Unlock()
+ return
+ }
+
+ // relock inode + parents.
+ // If inode stays the same - perform the forget; retry otherwise.
+
+ nlookup = 0 // make sure not to change lookupCount again, if we'll be retrying
+ lockv := []*Inode{inode}
+ for p := range inode.parents {
+ lockv = append(lockv, p.parent)
+ }
+
+ inodeVer := inode.changeCounter
+ inode.mu.Unlock()
+ lockNodes(lockv...)
+ if inodeVer != inode.changeCounter {
+ unlockNodes(lockv...)
+ continue retry
+ }
+
+ // we have locks on inode and .parents; let's unlink and forget
+ // inode. Also check if for unlinked parents forget was delayed,
+ // and if so and we are last child - do the forget on parent.
+ delayedForget := map[*Inode]struct{}{}
+ for p := range inode.parents {
+ iparent := p.parent
+ delete(iparent.children, p.name)
+ iparent.changeCounter++
+
+ // if parent was a directory with delayed forget and we
+ // were the last of its children - now is the time to
+ // do forget on the parent.
+ if iparent.lookupCount == 0 && len(iparent.children) == 0 {
+ delayedForget[iparent] = struct{}{}
+ }
+ }
+
+ nodeID := inode.nodeID
+ inode.nodeID = 0 // forgotten
+ inode.parents = map[parentData]struct{}{} // clear
+ inode.changeCounter++
+
+ unlockNodes(lockv...)
+
+ // release nodeID. In the meantime, while we were not holding
+ // b.mu locked and inode was forgotten, the ID could be already
+ // reused. So check if it is not the case.
+ b.mu.Lock()
+ if b.nodes[nodeID].inode == inode {
+ b.free = append(b.free, nodeID)
+ b.nodes[nodeID].inode = nil
+ }
+ b.mu.Unlock()
+
+ // execute delayed forget on parents
+ for iparent := range delayedForget {
+ b.forgetInode(iparent, 0)
+ }
+
+ // done
+ return
}
+}
+func (b *rawBridge) Lookup(header *fuse.InHeader, name string, out *fuse.EntryOut) (status fuse.Status) {
b.mu.Lock()
- defer b.mu.Unlock()
+ pentry := b.nodes[header.NodeId]
+ b.mu.Unlock()
- lockNodes(parent, child)
- parent.addLookup(name, child)
- unlockNodes(parent, child)
+ iparent := pentry.inode
- if child.nodeID == 0 {
- b.registerInode(child)
+ if iparent == nil {
+ return fuse.ENOENT // FIXME errcode=ok?
}
- out.NodeId = child.nodeID
- out.Generation = b.nodes[child.nodeID].generation
+retry:
+ for {
+ iparent.mu.Lock()
+ if iparent.nodeID == 0 {
+ iparent.mu.Unlock()
+ return fuse.ENOENT // forgotten XXX errcode=ok?
+ }
- if b.options.AttrTimeout != nil {
- out.SetAttrTimeout(*b.options.AttrTimeout)
- }
- if b.options.EntryTimeout != nil {
- out.SetEntryTimeout(*b.options.EntryTimeout)
- }
+ if iparent.mode&fuse.S_IFDIR == 0 {
+ iparent.mu.Unlock()
+ return fuse.ENOTDIR
+ }
- return fuse.OK
+ // if parent already has inode for this name - we can use it
+ ichild := iparent.children[name]
+ if ichild != nil {
+ // relock iparent + ichild; retry if iparent changed
+ iparentVer := iparent.changeCounter
+ iparent.mu.Unlock()
+ lockNodes(iparent, ichild)
+ if iparentVer != iparent.changeCounter {
+ unlockNodes(iparent, ichild)
+ continue retry
+ }
+ }
+
+ // iparent & ichild are locked; ichild might be nil.
+ lockv := []*Inode{iparent, ichild}
+ ichildNew := false
+ if ichild == nil {
+ // this goroutine becomes reponsible to call node.Lookup
+ // TODO place lookup-in-progress inode and do Lookup without iparent lock
+
+ child, code := iparent.node.Lookup(context.TODO(), name, out)
+ if !code.Ok() {
+ iparent.mu.Unlock()
+ if b.options.NegativeTimeout != nil {
+ out.SetEntryTimeout(*b.options.NegativeTimeout)
+ }
+ return code
+ }
+
+ ichild := newInode(child, out.Attr.Mode) // XXX + bridge
+ ichildNew = true
+ ok := child.setInode(ichild)
+ if !ok {
+ // someone concurrently going to this node (via
+ // different path), already set node -> inode
+ // assocation. This can happen e.g. if for
+ //
+ // root
+ // / \
+ // dir1 dir2
+ // \ /
+ // file
+ //
+ // dir1.Lookup("file") and dir2.Lookup("file") are executed simultaneously.
+ //
+ // we have to use that inode.
+ ichild = child.inode()
+
+ // relock to iparent + ichild and retry if iparent changes.
+ iparentVer := iparent.changeCounter
+ iparent.mu.Unlock()
+ lockNodes(iparent, ichild)
+ if iparentVer != iparent.changeCounter {
+ unlockNodes(iparent, ichild)
+ continue retry
+ }
+
+ ichildNew = false
+ lockv[1] = ichild
+ }
+ }
+
+ // now either:
+ //
+ // - iparent & ichild are locked, or
+ // - iparent is locked and ichild was just created anew.
+ //
+ // (what was locked is covered by lockv)
+ //
+ // -> perform `iparent <-> ichild` linking.
+ iparent.setEntry(name, ichild)
+ ichild.lookupCount++ // XXX ichild.changeCounter++ is in setEntry
+
+ // if ichild was created anew - we have to register ID for it.
+ // retrieve child generation while b is locked along the way.
+ b.mu.Lock()
+ if ichildNew {
+ b.registerInode(ichild)
+ }
+ childID := ichild.nodeID
+ childGen := b.nodes[childID].generation
+ b.mu.Unlock()
+
+ // release iparent + (ichild)? locks and we are done
+ unlockNodes(lockv...)
+
+ out.NodeId = childID
+ out.Generation = childGen
+
+ if b.options.AttrTimeout != nil {
+ out.SetAttrTimeout(*b.options.AttrTimeout)
+ }
+ if b.options.EntryTimeout != nil {
+ out.SetEntryTimeout(*b.options.EntryTimeout)
+ }
+
+ return fuse.OK
+ }
}
-func (b *rawBridge) registerInode(child *Inode) {
+// registerInode allocates new inode ID and registers inode with that ID to
+// inode ID index.
+//
+// must be called with inode and b locked.
+func (b *rawBridge) registerInode(inode *Inode) {
if l := len(b.free); l > 0 {
last := b.free[l-1]
b.free = b.free[:l-1]
- child.nodeID = last
- b.nodes[last].inode = child
+ inode.nodeID = last
+ b.nodes[last].inode = inode
b.nodes[last].generation++
} else {
last := len(b.nodes)
b.nodes = append(b.nodes, mapEntry{
- inode: child,
+ inode: inode,
})
- child.nodeID = uint64(last)
+ inode.nodeID = uint64(last)
}
+ inode.changeCounter++
}
func (b *rawBridge) Create(input *fuse.CreateIn, name string, out *fuse.CreateOut) (code fuse.Status) {
+ panic("TODO - similarly to Lookup")
+/*
ctx := context.TODO()
parent, _ := b.inode(input.NodeId, 0)
child, f, flags, code := parent.node.Create(ctx, name, input.Flags, input.Mode)
@@ -163,8 +412,11 @@ func (b *rawBridge) Create(input *fuse.CreateIn, name string, out *fuse.CreateOu
f.GetAttr(ctx, &out.Attr)
return fuse.OK
+*/
}
+// XXX move -> near Lookup, so that the interaction between Forget/Lookup is easier to oversee
+/*
func (b *rawBridge) Forget(nodeid, nlookup uint64) {
b.mu.Lock()
defer b.mu.Unlock()
@@ -179,6 +431,7 @@ func (b *rawBridge) Forget(nodeid, nlookup uint64) {
}
}
+*/
func (b *rawBridge) SetDebug(debug bool) {}
diff --git a/nodefs/default.go b/nodefs/default.go
index 56fcb4e..7fef8a1 100644
--- a/nodefs/default.go
+++ b/nodefs/default.go
@@ -6,22 +6,47 @@ package nodefs
// DefaultNode provides common base Node functionality.
//
// It must be embedded in any Node implementation.
type DefaultNode struct {
- inode *Inode
-}
-
-func (dn *DefaultNode) setInode(n *Inode) {
- dn.inode = n
-}
-
-func (dn *DefaultNode) Inode() *Inode {
- return dn.inode
+ inode_ *Inode
+}
+
+// set/retrieve inode.
+//
+// node -> inode association, can be simultaneously tried to be set, if for e.g.
+//
+// root
+// / \
+// dir1 dir2
+// \ /
+// file
+//
+// dir1.Lookup("file") and dir2.Lookup("file") are executed simultaneously.
+//
+// We use atomics so that only one set can win and rawBridge.Lookup cares to
+// cancel inode that loosed.
+//
+// To read node.inode atomic.LoadPointer is used, however it is not expensive
+// since it translates to regular MOVQ on amd64.
+
+func (dn *DefaultNode) setInode(inode *Inode) bool {
+ return atomic.CompareAndSwapPointer(
+ (*unsafe.Pointer)(unsafe.Pointer(&dn.inode_)),
+ nil, unsafe.Pointer(inode))
+}
+
+func (dn *DefaultNode) inode() *Inode {
+ return (*Inode)(atomic.LoadPointer(
+ (*unsafe.Pointer)(unsafe.Pointer(&dn.inode_))))
}
func (n *DefaultNode) Read(ctx context.Context, f File, dest []byte, off int64) (fuse.ReadResult, fuse.Status) {
diff --git a/nodefs/inode.go b/nodefs/inode.go
index 2f14b9d..4f3ebbb 100644
--- a/nodefs/inode.go
+++ b/nodefs/inode.go
@@ -34,38 +34,93 @@ type Inode struct {
// Following data is mutable.
- // Protected by bridge.mu
- nodeID uint64
-
// mu protects the following mutable fields. When locking
// multiple Inodes, locks must be acquired using
// lockNodes/unlockNodes
mu sync.Mutex
// changeCounter increments every time the below mutable state
// (lookupCount, nodeID, children, parents) is modified.
//
// This is used in places where we have to relock inode into inode
// group lock, and after locking the group we have to check if inode
// did not changed, and if it changed - retry the operation.
changeCounter uint32
lookupCount uint64
+
+ // ID of the inode; 0 if inode was forgotten.
+ // forgotten inodes are unlinked from parent and children, but could be
+ // still not yet removed from bridge.nodes .
+ nodeID uint64
+
children map[string]*Inode
parents map[parentData]struct{}
}
+// newInode creates creates new inode pointing to node.
+//
+// node -> inode association is NOT set.
+// the inode is _not_ yet has
+func newInode(node Node, mode uint32) *Inode {
+ inode := &Inode{
+ mode: mode ^ 07777,
+ node: node,
+ //bridge: n.bridge,
+ parents: make(map[parentData]struct{}),
+ }
+ if mode&fuse.S_IFDIR != 0 {
+ inode.children = make(map[string]*Inode)
+ }
+ //node.setInode(ch)
+ return inode
+}
+
// sortNodes rearranges inode group in consistent order.
//
// The nodes are ordered by their in-RAM address, which gives consistency
@@ -74,9 +129,14 @@ func unlockNodes(ns ...*Inode) {
// kernel has no way of reviving forgotten nodes by its own
// initiative.
func (n *Inode) Forgotten() bool {
+ /*
n.bridge.mu.Lock()
defer n.bridge.mu.Unlock()
return n.lookupCount == 0
+ */
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ return n.nodeID == 0
}
// Node returns the Node object implementing the file system operations.
diff --git a/nodefs/simple_test.go b/nodefs/simple_test.go
index df0e09c..5e2dc46 100644
--- a/nodefs/simple_test.go
+++ b/nodefs/simple_test.go
@@ -6,10 +6,10 @@ package nodefs
import (
"bytes"
- "io"
"io/ioutil"
"os"
"path/filepath"
+ "runtime"
"testing"
"time"
......@@ -36,6 +36,9 @@
// automatically builds new index node and links it in the filesystem tree.
// InodeOf can be used to get particular Inode associated with a Node.
//
// XXX once can also create new subtrees via linking nodes explicitly, and
// return such subtree on lookup.
//
// XXX ^^^ inodes cleaned on cache clean (FORGET).
//
// XXX describe how to mount.
......@@ -57,8 +60,11 @@ import (
//
// The identity of the Inode does not change over the lifetime of
// the node object.
//
// Returned Inode is always !nil - if node was not yet associated with inode, a
// new inode is atomically created and associated with the node.
func InodeOf(node Node) *Inode {
return node.inode()
return inodeOf(node)
}
/*
......@@ -81,21 +87,32 @@ type Node interface {
inode() *Inode
setInode(*Inode)
// NodeID() should return filesystem-wide ID of the node.
//
// If the node has such ID it will be used as the base for corresponding inode ID.
// If the node does not have such ID - 0 must be returned and
// automatically allocated inode ID will be used.
//
// The ID, if given, must be unique throughout filesystem.
//
// XXX range of allowed ID = ?
NodeID() uint64
// Lookup should find a direct child of the node by child name.
//
// VFS makes sure to call Lookup only once for particular (node, name)
// pair.
Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*Inode, fuse.Status)
Lookup(ctx context.Context, name string, out *fuse.EntryOut) (Node, fuse.Status)
Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*Inode, fuse.Status)
Mknod(ctx context.Context, name string, mode uint32, dev uint32, out *fuse.EntryOut) (*Inode, fuse.Status)
Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (Node, fuse.Status)
Mknod(ctx context.Context, name string, mode uint32, dev uint32, out *fuse.EntryOut) (Node, fuse.Status)
Rmdir(ctx context.Context, name string) fuse.Status
Unlink(ctx context.Context, name string) fuse.Status
Rename(ctx context.Context, name string, newParent Node, newName string, flags uint32) fuse.Status
Open(ctx context.Context, flags uint32) (fh File, fuseFlags uint32, code fuse.Status)
Create(ctx context.Context, name string, flags uint32, mode uint32) (node *Inode, fh File, fuseFlags uint32, code fuse.Status)
Create(ctx context.Context, name string, flags uint32, mode uint32) (node Node, fh File, fuseFlags uint32, code fuse.Status)
Read(ctx context.Context, f File, dest []byte, off int64) (fuse.ReadResult, fuse.Status)
......@@ -137,6 +154,7 @@ type Node interface {
Allocate(ctx context.Context, f File, off uint64, size uint64, mode uint32) (code fuse.Status)
}
// XXX -> Handle? FileHandle? (better Handle as handle could be used not only for leaves)
type File interface {
Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, fuse.Status)
Write(ctx context.Context, data []byte, off int64) (written uint32, code fuse.Status)
......
......@@ -20,6 +20,7 @@ type fileEntry struct {
// space to hold directory stuff
}
// rawBridge interconnects nodefs tree structure with raw FUSE exchange.
type rawBridge struct {
fuse.RawFileSystem
......@@ -36,7 +37,7 @@ type rawBridge struct {
freeFiles []uint64
}
// newInode creates creates new inode pointing to node.
// newInode creates new inode pointing to node.
// XXX - should store the Ino number we expose in GetAttr too ?
func (b *rawBridge) newInode(node Node, mode uint32, id FileID, persistent bool) *Inode {
b.mu.Lock()
......@@ -46,6 +47,8 @@ func (b *rawBridge) newInode(node Node, mode uint32, id FileID, persistent bool)
log.Panicf("using reserved ID %d for inode number", id.Ino)
}
// FIXME with automatic IDs this can create 2 inodes for 1 node if
// there are 2 concurrent lookups via different paths.
if id.Ino == 0 {
id.Ino = b.automaticIno
b.automaticIno++
......@@ -153,7 +156,6 @@ func (b *rawBridge) Rmdir(header *fuse.InHeader, name string) fuse.Status {
parent.RmChild(name)
}
return code
}
func (b *rawBridge) Unlink(header *fuse.InHeader, name string) fuse.Status {
......@@ -425,6 +427,9 @@ func (b *rawBridge) Open(input *fuse.OpenIn, out *fuse.OpenOut) (status fuse.Sta
//
// XXX is it allowed to return the same Fh from two different Open
// calls on the same inode?
// XXX -> absolutely no, 2 different opens should result in 2 diffrerent Fh.
// an example when not doing so would break badly is e.g. socket-like file-handles,
// where IO on different handle is completey separate from the rest.
func (b *rawBridge) registerFile(f File) uint64 {
var fh uint64
if len(b.freeFiles) > 0 {
......
......@@ -18,6 +18,10 @@ type DefaultNode struct {
inode_ *Inode
}
// XXX why we dropped atomics here?
// -> moved rawBridge.newInode() ?
// text: "set/retrieve inode" in na1.patch
func (dn *DefaultNode) setInode(inode *Inode) {
dn.inode_ = inode
}
......@@ -26,6 +30,12 @@ func (dn *DefaultNode) inode() *Inode {
return dn.inode_
}
func inodeOf(node Node) *Inode {
// XXX .inode = nil -> atomically create new
// XXX requires that we move atomic stuff back to setInode/inode/inodeOf
return node.inode()
}
func (n *DefaultNode) Read(ctx context.Context, f File, dest []byte, off int64) (fuse.ReadResult, fuse.Status) {
if f != nil {
return f.Read(ctx, dest, off)
......
......@@ -37,7 +37,7 @@ type FileID struct {
Gen uint64
}
// Zero returns if the FileID is zeroed out
// Reserved returns if the FileID is reserved and should not be used ... XXX
func (i *FileID) Reserved() bool {
return i.Ino == 0 || i.Ino == 1 || i.Ino == ^uint64(0)
}
......@@ -117,11 +117,12 @@ func nodeLess(a, b *Inode) bool {
// It also avoids locking an inode more than once, if it was specified multiple times.
// An example when an inode might be given multiple times is if dir/a and dir/b
// are hardlinked to the same inode and the caller needs to take locks on dir children.
//
// It is valid to give nil nodes - those are simply ignored.
func lockNodes(ns ...*Inode) {
sortNodes(ns)
// The default value nil prevents trying to lock nil nodes.
var nprev *Inode
var nprev *Inode = nil // initial nil + sort/dedup makes us ignore nil nodes
for _, n := range ns {
if n != nprev {
n.mu.Lock()
......@@ -133,22 +134,30 @@ func lockNodes(ns ...*Inode) {
// lockNode2 locks a and b in order consistent with lockNodes.
func lockNode2(a, b *Inode) {
if a == b {
b = nil
}
if !nodeLess(a, b) {
b, a = a, b
}
if a != nil {
a.mu.Lock()
} else if nodeLess(a, b) {
a.mu.Lock()
b.mu.Lock()
} else {
}
if b != nil {
b.mu.Lock()
a.mu.Lock()
}
}
// unlockNode2 unlocks a and b
// unlockNode2 unlocks a and b loced by lockNode2.
func unlockNode2(a, b *Inode) {
if a == b {
b = nil
}
// there is no deadlock if we unlock nodes in different compared to
// order they were locked.
if a != nil {
a.mu.Unlock()
} else {
a.mu.Unlock()
}
if b != nil {
b.mu.Unlock()
}
}
......@@ -159,7 +168,7 @@ func unlockNodes(ns ...*Inode) {
// however it still helps to have nodes sorted to avoid duplicates.
sortNodes(ns)
var nprev *Inode
var nprev *Inode = nil // initial nil + sort/dedup makes us ignore nil nodes
for _, n := range ns {
if n != nprev {
n.mu.Unlock()
......@@ -351,7 +360,7 @@ retry:
// RmChild removes multiple children. Returns whether the removal
// succeeded and whether the node is still live afterward. The removal
// is transactional: it only succeeds if all names are children, and
// is transactional: it only succeeds if all names are in children, and
// if they all were removed successfully. If the removal was
// successful, and there are no children left, the node may be removed
// from the FS tree. In that case, RmChild returns live==false.
......
......@@ -62,7 +62,7 @@ func (n *loopbackNode) path() string {
return filepath.Join(n.rootNode.root, path)
}
func (n *loopbackNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*Inode, fuse.Status) {
func (n *loopbackNode) Lookup(ctx context.Context, name string, out *fuse.EntryOut) (Node, fuse.Status) {
p := filepath.Join(n.path(), name)
st := syscall.Stat_t{}
......@@ -73,11 +73,12 @@ func (n *loopbackNode) Lookup(ctx context.Context, name string, out *fuse.EntryO
out.Attr.FromStat(&st)
node := n.rootNode.newLoopbackNode()
ch := n.inode().NewInode(node, out.Attr.Mode, idFromStat(&st))
return ch, fuse.OK
return node, fuse.OK
//ch := n.inode().NewInode(node, out.Attr.Mode, idFromStat(&st))
//return ch, fuse.OK
}
func (n *loopbackNode) Mknod(ctx context.Context, name string, mode, rdev uint32, out *fuse.EntryOut) (*Inode, fuse.Status) {
func (n *loopbackNode) Mknod(ctx context.Context, name string, mode, rdev uint32, out *fuse.EntryOut) (Node, fuse.Status) {
p := filepath.Join(n.path(), name)
err := syscall.Mknod(p, mode, int(rdev))
if err != nil {
......@@ -92,9 +93,10 @@ func (n *loopbackNode) Mknod(ctx context.Context, name string, mode, rdev uint32
out.Attr.FromStat(&st)
node := n.rootNode.newLoopbackNode()
ch := n.inode().NewInode(node, out.Attr.Mode, idFromStat(&st))
return node, fuse.OK
return ch, fuse.OK
//ch := n.inode().NewInode(node, out.Attr.Mode, idFromStat(&st))
//return ch, fuse.OK
}
func (n *loopbackNode) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*Inode, fuse.Status) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment