Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
b9c64498
Commit
b9c64498
authored
Mar 03, 2008
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/jk/spufs
into merge
parents
35d77ef1
2a58aa33
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
38 additions
and
12 deletions
+38
-12
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spu_base.c
+13
-3
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/context.c
+3
-4
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/file.c
+11
-1
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/sched.c
+1
-1
arch/powerpc/platforms/cell/spufs/sputrace.c
arch/powerpc/platforms/cell/spufs/sputrace.c
+4
-3
arch/powerpc/platforms/cell/spufs/switch.c
arch/powerpc/platforms/cell/spufs/switch.c
+6
-0
No files found.
arch/powerpc/platforms/cell/spu_base.c
View file @
b9c64498
...
@@ -81,9 +81,12 @@ struct spu_slb {
...
@@ -81,9 +81,12 @@ struct spu_slb {
void
spu_invalidate_slbs
(
struct
spu
*
spu
)
void
spu_invalidate_slbs
(
struct
spu
*
spu
)
{
{
struct
spu_priv2
__iomem
*
priv2
=
spu
->
priv2
;
struct
spu_priv2
__iomem
*
priv2
=
spu
->
priv2
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
spu
->
register_lock
,
flags
);
if
(
spu_mfc_sr1_get
(
spu
)
&
MFC_STATE1_RELOCATE_MASK
)
if
(
spu_mfc_sr1_get
(
spu
)
&
MFC_STATE1_RELOCATE_MASK
)
out_be64
(
&
priv2
->
slb_invalidate_all_W
,
0UL
);
out_be64
(
&
priv2
->
slb_invalidate_all_W
,
0UL
);
spin_unlock_irqrestore
(
&
spu
->
register_lock
,
flags
);
}
}
EXPORT_SYMBOL_GPL
(
spu_invalidate_slbs
);
EXPORT_SYMBOL_GPL
(
spu_invalidate_slbs
);
...
@@ -148,7 +151,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
...
@@ -148,7 +151,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
__func__
,
slbe
,
slb
->
vsid
,
slb
->
esid
);
__func__
,
slbe
,
slb
->
vsid
,
slb
->
esid
);
out_be64
(
&
priv2
->
slb_index_W
,
slbe
);
out_be64
(
&
priv2
->
slb_index_W
,
slbe
);
/* set invalid before writing vsid */
out_be64
(
&
priv2
->
slb_esid_RW
,
0
);
/* now it's safe to write the vsid */
out_be64
(
&
priv2
->
slb_vsid_RW
,
slb
->
vsid
);
out_be64
(
&
priv2
->
slb_vsid_RW
,
slb
->
vsid
);
/* setting the new esid makes the entry valid again */
out_be64
(
&
priv2
->
slb_esid_RW
,
slb
->
esid
);
out_be64
(
&
priv2
->
slb_esid_RW
,
slb
->
esid
);
}
}
...
@@ -290,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
...
@@ -290,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
nr_slbs
++
;
nr_slbs
++
;
}
}
spin_lock_irq
(
&
spu
->
register_lock
);
/* Add the set of SLBs */
/* Add the set of SLBs */
for
(
i
=
0
;
i
<
nr_slbs
;
i
++
)
for
(
i
=
0
;
i
<
nr_slbs
;
i
++
)
spu_load_slb
(
spu
,
i
,
&
slbs
[
i
]);
spu_load_slb
(
spu
,
i
,
&
slbs
[
i
]);
spin_unlock_irq
(
&
spu
->
register_lock
);
}
}
EXPORT_SYMBOL_GPL
(
spu_setup_kernel_slbs
);
EXPORT_SYMBOL_GPL
(
spu_setup_kernel_slbs
);
...
@@ -337,13 +346,14 @@ spu_irq_class_1(int irq, void *data)
...
@@ -337,13 +346,14 @@ spu_irq_class_1(int irq, void *data)
if
(
stat
&
CLASS1_STORAGE_FAULT_INTR
)
if
(
stat
&
CLASS1_STORAGE_FAULT_INTR
)
spu_mfc_dsisr_set
(
spu
,
0ul
);
spu_mfc_dsisr_set
(
spu
,
0ul
);
spu_int_stat_clear
(
spu
,
1
,
stat
);
spu_int_stat_clear
(
spu
,
1
,
stat
);
spin_unlock
(
&
spu
->
register_lock
);
pr_debug
(
"%s: %lx %lx %lx %lx
\n
"
,
__FUNCTION__
,
mask
,
stat
,
dar
,
dsisr
);
if
(
stat
&
CLASS1_SEGMENT_FAULT_INTR
)
if
(
stat
&
CLASS1_SEGMENT_FAULT_INTR
)
__spu_trap_data_seg
(
spu
,
dar
);
__spu_trap_data_seg
(
spu
,
dar
);
spin_unlock
(
&
spu
->
register_lock
);
pr_debug
(
"%s: %lx %lx %lx %lx
\n
"
,
__FUNCTION__
,
mask
,
stat
,
dar
,
dsisr
);
if
(
stat
&
CLASS1_STORAGE_FAULT_INTR
)
if
(
stat
&
CLASS1_STORAGE_FAULT_INTR
)
__spu_trap_data_map
(
spu
,
dar
,
dsisr
);
__spu_trap_data_map
(
spu
,
dar
,
dsisr
);
...
...
arch/powerpc/platforms/cell/spufs/context.c
View file @
b9c64498
...
@@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx)
...
@@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx)
/*
/*
* This is basically an open-coded spu_acquire_saved, except that
* This is basically an open-coded spu_acquire_saved, except that
* we don't acquire the state mutex interruptible.
* we don't acquire the state mutex interruptible, and we don't
* want this context to be rescheduled on release.
*/
*/
mutex_lock
(
&
ctx
->
state_mutex
);
mutex_lock
(
&
ctx
->
state_mutex
);
if
(
ctx
->
state
!=
SPU_STATE_SAVED
)
{
if
(
ctx
->
state
!=
SPU_STATE_SAVED
)
set_bit
(
SPU_SCHED_WAS_ACTIVE
,
&
ctx
->
sched_flags
);
spu_deactivate
(
ctx
);
spu_deactivate
(
ctx
);
}
mm
=
ctx
->
owner
;
mm
=
ctx
->
owner
;
ctx
->
owner
=
NULL
;
ctx
->
owner
=
NULL
;
...
...
arch/powerpc/platforms/cell/spufs/file.c
View file @
b9c64498
...
@@ -366,6 +366,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
...
@@ -366,6 +366,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if
(
offset
>=
ps_size
)
if
(
offset
>=
ps_size
)
return
NOPFN_SIGBUS
;
return
NOPFN_SIGBUS
;
/*
* Because we release the mmap_sem, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed
* in the meantime.
*/
get_spu_context
(
ctx
);
/*
/*
* We have to wait for context to be loaded before we have
* We have to wait for context to be loaded before we have
* pages to hand out to the user, but we don't want to wait
* pages to hand out to the user, but we don't want to wait
...
@@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
...
@@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
* hanged.
* hanged.
*/
*/
if
(
spu_acquire
(
ctx
))
if
(
spu_acquire
(
ctx
))
return
NOPFN_REFAULT
;
goto
refault
;
if
(
ctx
->
state
==
SPU_STATE_SAVED
)
{
if
(
ctx
->
state
==
SPU_STATE_SAVED
)
{
up_read
(
&
current
->
mm
->
mmap_sem
);
up_read
(
&
current
->
mm
->
mmap_sem
);
...
@@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
...
@@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if
(
!
ret
)
if
(
!
ret
)
spu_release
(
ctx
);
spu_release
(
ctx
);
refault:
put_spu_context
(
ctx
);
return
NOPFN_REFAULT
;
return
NOPFN_REFAULT
;
}
}
...
...
arch/powerpc/platforms/cell/spufs/sched.c
View file @
b9c64498
...
@@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
...
@@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu_switch_notify
(
spu
,
ctx
);
spu_switch_notify
(
spu
,
ctx
);
ctx
->
state
=
SPU_STATE_RUNNABLE
;
ctx
->
state
=
SPU_STATE_RUNNABLE
;
spuctx_switch_state
(
ctx
,
SPU_UTIL_
IDLE_LOADED
);
spuctx_switch_state
(
ctx
,
SPU_UTIL_
USER
);
}
}
/*
/*
...
...
arch/powerpc/platforms/cell/spufs/sputrace.c
View file @
b9c64498
...
@@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n)
...
@@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n)
ktime_to_timespec
(
ktime_sub
(
t
->
tstamp
,
sputrace_start
));
ktime_to_timespec
(
ktime_sub
(
t
->
tstamp
,
sputrace_start
));
return
snprintf
(
tbuf
,
n
,
return
snprintf
(
tbuf
,
n
,
"[%lu.%09lu] %d: %s (thread = %d, spu = %d)
\n
"
,
"[%lu.%09lu] %d: %s (
ctx
thread = %d, spu = %d)
\n
"
,
(
unsigned
long
)
tv
.
tv_sec
,
(
unsigned
long
)
tv
.
tv_sec
,
(
unsigned
long
)
tv
.
tv_nsec
,
(
unsigned
long
)
tv
.
tv_nsec
,
t
->
owner_tid
,
t
->
name
,
t
->
curr_tid
,
t
->
curr_tid
,
t
->
name
,
t
->
owner_tid
,
t
->
number
);
t
->
number
);
}
}
...
@@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = {
...
@@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = {
{
"spufs_ps_nopfn__insert"
,
"%p %p"
,
spu_context_event
},
{
"spufs_ps_nopfn__insert"
,
"%p %p"
,
spu_context_event
},
{
"spu_acquire_saved__enter"
,
"%p"
,
spu_context_nospu_event
},
{
"spu_acquire_saved__enter"
,
"%p"
,
spu_context_nospu_event
},
{
"destroy_spu_context__enter"
,
"%p"
,
spu_context_nospu_event
},
{
"destroy_spu_context__enter"
,
"%p"
,
spu_context_nospu_event
},
{
"spufs_stop_callback__enter"
,
"%p %p"
,
spu_context_event
},
};
};
static
int
__init
sputrace_init
(
void
)
static
int
__init
sputrace_init
(
void
)
...
...
arch/powerpc/platforms/cell/spufs/switch.c
View file @
b9c64498
...
@@ -34,6 +34,7 @@
...
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mm.h>
...
@@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
...
@@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
* Write INT_MASK_class1 with value of 0.
* Write INT_MASK_class1 with value of 0.
* Save INT_Mask_class2 in CSA.
* Save INT_Mask_class2 in CSA.
* Write INT_MASK_class2 with value of 0.
* Write INT_MASK_class2 with value of 0.
* Synchronize all three interrupts to be sure
* we no longer execute a handler on another CPU.
*/
*/
spin_lock_irq
(
&
spu
->
register_lock
);
spin_lock_irq
(
&
spu
->
register_lock
);
if
(
csa
)
{
if
(
csa
)
{
...
@@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
...
@@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
spu_int_mask_set
(
spu
,
2
,
0ul
);
spu_int_mask_set
(
spu
,
2
,
0ul
);
eieio
();
eieio
();
spin_unlock_irq
(
&
spu
->
register_lock
);
spin_unlock_irq
(
&
spu
->
register_lock
);
synchronize_irq
(
spu
->
irqs
[
0
]);
synchronize_irq
(
spu
->
irqs
[
1
]);
synchronize_irq
(
spu
->
irqs
[
2
]);
}
}
static
inline
void
set_watchdog_timer
(
struct
spu_state
*
csa
,
struct
spu
*
spu
)
static
inline
void
set_watchdog_timer
(
struct
spu_state
*
csa
,
struct
spu
*
spu
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment