Commit efb80e7e authored by Kyle McMartin's avatar Kyle McMartin Committed by Kyle McMartin

[PARISC] import necessary bits of libgcc.a

Currently we're hacking libs-y to include libgcc.a, but this has
unforeseen consequences since the userspace libgcc is linked with fpregs
enabled. We need the kernel to stop using fpregs in an uncontrolled manner
to implement lazy fpu state saves.
Signed-off-by: default avatarKyle McMartin <kyle@mcmartin.ca>
parent 6f7d998e
......@@ -69,7 +69,7 @@ kernel-y := mm/ kernel/ math-emu/ kernel/init_task.o
kernel-$(CONFIG_HPUX) += hpux/
core-y += $(addprefix arch/parisc/, $(kernel-y))
libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
libs-y += arch/parisc/lib/
drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
......
......@@ -122,31 +122,9 @@ EXPORT_SYMBOL($$divI_12);
EXPORT_SYMBOL($$divI_14);
EXPORT_SYMBOL($$divI_15);
extern void __ashrdi3(void);
extern void __ashldi3(void);
extern void __lshrdi3(void);
extern void __muldi3(void);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
asmlinkage void * __canonicalize_funcptr_for_compare(void *);
EXPORT_SYMBOL(__canonicalize_funcptr_for_compare);
#ifdef CONFIG_64BIT
extern void __divdi3(void);
extern void __udivdi3(void);
extern void __umoddi3(void);
extern void __moddi3(void);
EXPORT_SYMBOL(__divdi3);
EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__umoddi3);
EXPORT_SYMBOL(__moddi3);
#endif
#ifndef CONFIG_64BIT
extern void $$dyncall(void);
EXPORT_SYMBOL($$dyncall);
......
......@@ -4,4 +4,4 @@
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
obj-y := iomap.o
obj-y := libgcc/ milli/ iomap.o
obj-y := __ashldi3.o __ashrdi3.o __clzsi2.o __divdi3.o __divsi3.o \
__lshrdi3.o __moddi3.o __modsi3.o __udivdi3.o \
__udivmoddi4.o __udivmodsi4.o __udivsi3.o \
__umoddi3.o __umodsi3.o __muldi3.o __umulsidi3.o
#include "libgcc.h"
u64 __ashldi3(u64 v, int cnt)
{
int c = cnt & 31;
u32 vl = (u32) v;
u32 vh = (u32) (v >> 32);
if (cnt & 32) {
vh = (vl << c);
vl = 0;
} else {
vh = (vh << c) + (vl >> (32 - c));
vl = (vl << c);
}
return ((u64) vh << 32) + vl;
}
EXPORT_SYMBOL(__ashldi3);
#include "libgcc.h"
u64 __ashrdi3(u64 v, int cnt)
{
int c = cnt & 31;
u32 vl = (u32) v;
u32 vh = (u32) (v >> 32);
if (cnt & 32) {
vl = ((s32) vh >> c);
vh = (s32) vh >> 31;
} else {
vl = (vl >> c) + (vh << (32 - c));
vh = ((s32) vh >> c);
}
return ((u64) vh << 32) + vl;
}
EXPORT_SYMBOL(__ashrdi3);
#include "libgcc.h"
u32 __clzsi2(u32 v)
{
int p = 31;
if (v & 0xffff0000) {
p -= 16;
v >>= 16;
}
if (v & 0xff00) {
p -= 8;
v >>= 8;
}
if (v & 0xf0) {
p -= 4;
v >>= 4;
}
if (v & 0xc) {
p -= 2;
v >>= 2;
}
if (v & 0x2) {
p -= 1;
v >>= 1;
}
return p;
}
EXPORT_SYMBOL(__clzsi2);
#include "libgcc.h"
s64 __divdi3(s64 num, s64 den)
{
int minus = 0;
s64 v;
if (num < 0) {
num = -num;
minus = 1;
}
if (den < 0) {
den = -den;
minus ^= 1;
}
v = __udivmoddi4(num, den, NULL);
if (minus)
v = -v;
return v;
}
EXPORT_SYMBOL(__divdi3);
#include "libgcc.h"
s32 __divsi3(s32 num, s32 den)
{
int minus = 0;
s32 v;
if (num < 0) {
num = -num;
minus = 1;
}
if (den < 0) {
den = -den;
minus ^= 1;
}
v = __udivmodsi4(num, den, NULL);
if (minus)
v = -v;
return v;
}
EXPORT_SYMBOL(__divsi3);
#include "libgcc.h"
u64 __lshrdi3(u64 v, int cnt)
{
int c = cnt & 31;
u32 vl = (u32) v;
u32 vh = (u32) (v >> 32);
if (cnt & 32) {
vl = (vh >> c);
vh = 0;
} else {
vl = (vl >> c) + (vh << (32 - c));
vh = (vh >> c);
}
return ((u64) vh << 32) + vl;
}
EXPORT_SYMBOL(__lshrdi3);
#include "libgcc.h"
s64 __moddi3(s64 num, s64 den)
{
int minus = 0;
s64 v;
if (num < 0) {
num = -num;
minus = 1;
}
if (den < 0) {
den = -den;
minus ^= 1;
}
(void)__udivmoddi4(num, den, (u64 *) & v);
if (minus)
v = -v;
return v;
}
EXPORT_SYMBOL(__moddi3);
#include "libgcc.h"
s32 __modsi3(s32 num, s32 den)
{
int minus = 0;
s32 v;
if (num < 0) {
num = -num;
minus = 1;
}
if (den < 0) {
den = -den;
minus ^= 1;
}
(void)__udivmodsi4(num, den, (u32 *) & v);
if (minus)
v = -v;
return v;
}
EXPORT_SYMBOL(__modsi3);
#include "libgcc.h"
union DWunion {
struct {
s32 high;
s32 low;
} s;
s64 ll;
};
s64 __muldi3(s64 u, s64 v)
{
const union DWunion uu = { .ll = u };
const union DWunion vv = { .ll = v };
union DWunion w = { .ll = __umulsidi3(uu.s.low, vv.s.low) };
w.s.high += ((u32)uu.s.low * (u32)vv.s.high
+ (u32)uu.s.high * (u32)vv.s.low);
return w.ll;
}
EXPORT_SYMBOL(__muldi3);
#include "libgcc.h"
u64 __udivdi3(u64 num, u64 den)
{
return __udivmoddi4(num, den, NULL);
}
EXPORT_SYMBOL(__udivdi3);
#include "libgcc.h"
u64 __udivmoddi4(u64 num, u64 den, u64 * rem_p)
{
u64 quot = 0, qbit = 1;
if (den == 0) {
BUG();
}
/* Left-justify denominator and count shift */
while ((s64) den >= 0) {
den <<= 1;
qbit <<= 1;
}
while (qbit) {
if (den <= num) {
num -= den;
quot += qbit;
}
den >>= 1;
qbit >>= 1;
}
if (rem_p)
*rem_p = num;
return quot;
}
EXPORT_SYMBOL(__udivmoddi4);
#include "libgcc.h"
u32 __udivmodsi4(u32 num, u32 den, u32 * rem_p)
{
u32 quot = 0, qbit = 1;
if (den == 0) {
BUG();
}
/* Left-justify denominator and count shift */
while ((s32) den >= 0) {
den <<= 1;
qbit <<= 1;
}
while (qbit) {
if (den <= num) {
num -= den;
quot += qbit;
}
den >>= 1;
qbit >>= 1;
}
if (rem_p)
*rem_p = num;
return quot;
}
EXPORT_SYMBOL(__udivmodsi4);
#include "libgcc.h"
u32 __udivsi3(u32 num, u32 den)
{
return __udivmodsi4(num, den, NULL);
}
EXPORT_SYMBOL(__udivsi3);
#include "libgcc.h"
u64 __umoddi3(u64 num, u64 den)
{
u64 v;
(void)__udivmoddi4(num, den, &v);
return v;
}
EXPORT_SYMBOL(__umoddi3);
#include "libgcc.h"
u32 __umodsi3(u32 num, u32 den)
{
u32 v;
(void)__udivmodsi4(num, den, &v);
return v;
}
EXPORT_SYMBOL(__umodsi3);
#include "libgcc.h"
#define __ll_B ((u32) 1 << (32 / 2))
#define __ll_lowpart(t) ((u32) (t) & (__ll_B - 1))
#define __ll_highpart(t) ((u32) (t) >> 16)
#define umul_ppmm(w1, w0, u, v) \
do { \
u32 __x0, __x1, __x2, __x3; \
u16 __ul, __vl, __uh, __vh; \
\
__ul = __ll_lowpart (u); \
__uh = __ll_highpart (u); \
__vl = __ll_lowpart (v); \
__vh = __ll_highpart (v); \
\
__x0 = (u32) __ul * __vl; \
__x1 = (u32) __ul * __vh; \
__x2 = (u32) __uh * __vl; \
__x3 = (u32) __uh * __vh; \
\
__x1 += __ll_highpart (__x0);/* this can't give carry */ \
__x1 += __x2; /* but this indeed can */ \
if (__x1 < __x2) /* did we get it? */ \
__x3 += __ll_B; /* yes, add it in the proper pos. */ \
\
(w1) = __x3 + __ll_highpart (__x1); \
(w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
} while (0)
union DWunion {
struct {
s32 high;
s32 low;
} s;
s64 ll;
};
u64 __umulsidi3(u32 u, u32 v)
{
union DWunion __w;
umul_ppmm(__w.s.high, __w.s.low, u, v);
return __w.ll;
}
#ifndef _PA_LIBGCC_H_
#define _PA_LIBGCC_H_
#include <linux/types.h>
#include <linux/module.h>
/* Cribbed from klibc/libgcc/ */
u64 __ashldi3(u64 v, int cnt);
u64 __ashrdi3(u64 v, int cnt);
u32 __clzsi2(u32 v);
s64 __divdi3(s64 num, s64 den);
s32 __divsi3(s32 num, s32 den);
u64 __lshrdi3(u64 v, int cnt);
s64 __moddi3(s64 num, s64 den);
s32 __modsi3(s32 num, s32 den);
u64 __udivdi3(u64 num, u64 den);
u32 __udivsi3(u32 num, u32 den);
u64 __udivmoddi4(u64 num, u64 den, u64 * rem_p);
u32 __udivmodsi4(u32 num, u32 den, u32 * rem_p);
u64 __umulsidi3(u32 u, u32 v);
u64 __umoddi3(u64 num, u64 den);
u32 __umodsi3(u32 num, u32 den);
#endif /*_PA_LIBGCC_H_*/
obj-y := dyncall.o divI.o divU.o remI.o remU.o div_const.o mulI.o
/* 32 and 64-bit millicode, original author Hewlett-Packard
adapted for gcc by Paul Bame <bame@debian.org>
and Alan Modra <alan@linuxcare.com.au>.
Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC and is released under the terms of
of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
See the file COPYING in the top-level GCC source directory for a copy
of the license. */
#include "milli.h"
#ifdef L_divI
/* ROUTINES: $$divI, $$divoI
Single precision divide for signed binary integers.
The quotient is truncated towards zero.
The sign of the quotient is the XOR of the signs of the dividend and
divisor.
Divide by zero is trapped.
Divide of -2**31 by -1 is trapped for $$divoI but not for $$divI.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = undefined
. arg1 = undefined
. ret1 = quotient
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions:
. divisor is zero (traps with ADDIT,= 0,25,0)
. dividend==-2**31 and divisor==-1 and routine is $$divoI
. (traps with ADDO 26,25,0)
. Changes memory at the following places:
. NONE
PERMISSIBLE CONTEXT:
. Unwindable.
. Suitable for internal or external millicode.
. Assumes the special millicode register conventions.
DISCUSSION:
. Branchs to other millicode routines using BE
. $$div_# for # being 2,3,4,5,6,7,8,9,10,12,14,15
.
. For selected divisors, calls a divide by constant routine written by
. Karl Pettis. Eligible divisors are 1..15 excluding 11 and 13.
.
. The only overflow case is -2**31 divided by -1.
. Both routines return -2**31 but only $$divoI traps. */
RDEFINE(temp,r1)
RDEFINE(retreg,ret1) /* r29 */
RDEFINE(temp1,arg0)
SUBSPA_MILLI_DIV
ATTR_MILLI
.import $$divI_2,millicode
.import $$divI_3,millicode
.import $$divI_4,millicode
.import $$divI_5,millicode
.import $$divI_6,millicode
.import $$divI_7,millicode
.import $$divI_8,millicode
.import $$divI_9,millicode
.import $$divI_10,millicode
.import $$divI_12,millicode
.import $$divI_14,millicode
.import $$divI_15,millicode
.export $$divI,millicode
.export $$divoI,millicode
.proc
.callinfo millicode
.entry
GSYM($$divoI)
comib,=,n -1,arg1,LREF(negative1) /* when divisor == -1 */
GSYM($$divI)
ldo -1(arg1),temp /* is there at most one bit set ? */
and,<> arg1,temp,r0 /* if not, don't use power of 2 divide */
addi,> 0,arg1,r0 /* if divisor > 0, use power of 2 divide */
b,n LREF(neg_denom)
LSYM(pow2)
addi,>= 0,arg0,retreg /* if numerator is negative, add the */
add arg0,temp,retreg /* (denominaotr -1) to correct for shifts */
extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
extrs retreg,15,16,retreg /* retreg = retreg >> 16 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
ldi 0xcc,temp1 /* setup 0xcc in temp1 */
extru,= arg1,23,8,temp /* test denominator with 0xff00 */
extrs retreg,23,24,retreg /* retreg = retreg >> 8 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
ldi 0xaa,temp /* setup 0xaa in temp */
extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
extrs retreg,27,28,retreg /* retreg = retreg >> 4 */
and,= arg1,temp1,r0 /* test denominator with 0xcc */
extrs retreg,29,30,retreg /* retreg = retreg >> 2 */
and,= arg1,temp,r0 /* test denominator with 0xaa */
extrs retreg,30,31,retreg /* retreg = retreg >> 1 */
MILLIRETN
LSYM(neg_denom)
addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power of 2 */
b,n LREF(regular_seq)
sub r0,arg1,temp /* make denominator positive */
comb,=,n arg1,temp,LREF(regular_seq) /* test against 0x80000000 and 0 */
ldo -1(temp),retreg /* is there at most one bit set ? */
and,= temp,retreg,r0 /* if so, the denominator is power of 2 */
b,n LREF(regular_seq)
sub r0,arg0,retreg /* negate numerator */
comb,=,n arg0,retreg,LREF(regular_seq) /* test against 0x80000000 */
copy retreg,arg0 /* set up arg0, arg1 and temp */
copy temp,arg1 /* before branching to pow2 */
b LREF(pow2)
ldo -1(arg1),temp
LSYM(regular_seq)
comib,>>=,n 15,arg1,LREF(small_divisor)
add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
LSYM(normal)
subi 0,retreg,retreg /* make it positive */
sub 0,arg1,temp /* clear carry, */
/* negate the divisor */
ds 0,temp,0 /* set V-bit to the comple- */
/* ment of the divisor sign */
add retreg,retreg,retreg /* shift msb bit into carry */
ds r0,arg1,temp /* 1st divide step, if no carry */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 2nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 3rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 4th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 5th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 6th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 7th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 8th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 9th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 10th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 11th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 12th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 13th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 14th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 15th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 16th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 17th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 18th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 19th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 20th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 21st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 22nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 23rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 24th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 25th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 26th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 27th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 28th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 29th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 30th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 31st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 32nd divide step, */
addc retreg,retreg,retreg /* shift last retreg bit into retreg */
xor,>= arg0,arg1,0 /* get correct sign of quotient */
sub 0,retreg,retreg /* based on operand signs */
MILLIRETN
nop
LSYM(small_divisor)
#if defined(CONFIG_64BIT)
/* Clear the upper 32 bits of the arg1 register. We are working with */
/* small divisors (and 32-bit integers) We must not be mislead */
/* by "1" bits left in the upper 32 bits. */
depd %r0,31,32,%r25
#endif
blr,n arg1,r0
nop
/* table for divisor == 0,1, ... ,15 */
addit,= 0,arg1,r0 /* trap if divisor == 0 */
nop
MILLIRET /* divisor == 1 */
copy arg0,retreg
MILLI_BEN($$divI_2) /* divisor == 2 */
nop
MILLI_BEN($$divI_3) /* divisor == 3 */
nop
MILLI_BEN($$divI_4) /* divisor == 4 */
nop
MILLI_BEN($$divI_5) /* divisor == 5 */
nop
MILLI_BEN($$divI_6) /* divisor == 6 */
nop
MILLI_BEN($$divI_7) /* divisor == 7 */
nop
MILLI_BEN($$divI_8) /* divisor == 8 */
nop
MILLI_BEN($$divI_9) /* divisor == 9 */
nop
MILLI_BEN($$divI_10) /* divisor == 10 */
nop
b LREF(normal) /* divisor == 11 */
add,>= 0,arg0,retreg
MILLI_BEN($$divI_12) /* divisor == 12 */
nop
b LREF(normal) /* divisor == 13 */
add,>= 0,arg0,retreg
MILLI_BEN($$divI_14) /* divisor == 14 */
nop
MILLI_BEN($$divI_15) /* divisor == 15 */
nop
LSYM(negative1)
sub 0,arg0,retreg /* result is negation of dividend */
MILLIRET
addo arg0,arg1,r0 /* trap iff dividend==0x80000000 && divisor==-1 */
.exit
.procend
.end
#endif
/* 32 and 64-bit millicode, original author Hewlett-Packard
adapted for gcc by Paul Bame <bame@debian.org>
and Alan Modra <alan@linuxcare.com.au>.
Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC and is released under the terms of
of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
See the file COPYING in the top-level GCC source directory for a copy
of the license. */
#include "milli.h"
#ifdef L_divU
/* ROUTINE: $$divU
.
. Single precision divide for unsigned integers.
.
. Quotient is truncated towards zero.
. Traps on divide by zero.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = undefined
. arg1 = undefined
. ret1 = quotient
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions:
. divisor is zero
. Changes memory at the following places:
. NONE
PERMISSIBLE CONTEXT:
. Unwindable.
. Does not create a stack frame.
. Suitable for internal or external millicode.
. Assumes the special millicode register conventions.
DISCUSSION:
. Branchs to other millicode routines using BE:
. $$divU_# for 3,5,6,7,9,10,12,14,15
.
. For selected small divisors calls the special divide by constant
. routines written by Karl Pettis. These are: 3,5,6,7,9,10,12,14,15. */
RDEFINE(temp,r1)
RDEFINE(retreg,ret1) /* r29 */
RDEFINE(temp1,arg0)
SUBSPA_MILLI_DIV
ATTR_MILLI
.export $$divU,millicode
.import $$divU_3,millicode
.import $$divU_5,millicode
.import $$divU_6,millicode
.import $$divU_7,millicode
.import $$divU_9,millicode
.import $$divU_10,millicode
.import $$divU_12,millicode
.import $$divU_14,millicode
.import $$divU_15,millicode
.proc
.callinfo millicode
.entry
GSYM($$divU)
/* The subtract is not nullified since it does no harm and can be used
by the two cases that branch back to "normal". */
ldo -1(arg1),temp /* is there at most one bit set ? */
and,= arg1,temp,r0 /* if so, denominator is power of 2 */
b LREF(regular_seq)
addit,= 0,arg1,0 /* trap for zero dvr */
copy arg0,retreg
extru,= arg1,15,16,temp /* test denominator with 0xffff0000 */
extru retreg,15,16,retreg /* retreg = retreg >> 16 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 16) */
ldi 0xcc,temp1 /* setup 0xcc in temp1 */
extru,= arg1,23,8,temp /* test denominator with 0xff00 */
extru retreg,23,24,retreg /* retreg = retreg >> 8 */
or arg1,temp,arg1 /* arg1 = arg1 | (arg1 >> 8) */
ldi 0xaa,temp /* setup 0xaa in temp */
extru,= arg1,27,4,r0 /* test denominator with 0xf0 */
extru retreg,27,28,retreg /* retreg = retreg >> 4 */
and,= arg1,temp1,r0 /* test denominator with 0xcc */
extru retreg,29,30,retreg /* retreg = retreg >> 2 */
and,= arg1,temp,r0 /* test denominator with 0xaa */
extru retreg,30,31,retreg /* retreg = retreg >> 1 */
MILLIRETN
nop
LSYM(regular_seq)
comib,>= 15,arg1,LREF(special_divisor)
subi 0,arg1,temp /* clear carry, negate the divisor */
ds r0,temp,r0 /* set V-bit to 1 */
LSYM(normal)
add arg0,arg0,retreg /* shift msb bit into carry */
ds r0,arg1,temp /* 1st divide step, if no carry */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 2nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 3rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 4th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 5th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 6th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 7th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 8th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 9th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 10th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 11th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 12th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 13th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 14th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 15th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 16th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 17th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 18th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 19th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 20th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 21st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 22nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 23rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 24th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 25th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 26th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 27th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 28th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 29th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 30th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 31st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds temp,arg1,temp /* 32nd divide step, */
MILLIRET
addc retreg,retreg,retreg /* shift last retreg bit into retreg */
/* Handle the cases where divisor is a small constant or has high bit on. */
LSYM(special_divisor)
/* blr arg1,r0 */
/* comib,>,n 0,arg1,LREF(big_divisor) ; nullify previous instruction */
/* Pratap 8/13/90. The 815 Stirling chip set has a bug that prevents us from
generating such a blr, comib sequence. A problem in nullification. So I
rewrote this code. */
#if defined(CONFIG_64BIT)
/* Clear the upper 32 bits of the arg1 register. We are working with
small divisors (and 32-bit unsigned integers) We must not be mislead
by "1" bits left in the upper 32 bits. */
depd %r0,31,32,%r25
#endif
comib,> 0,arg1,LREF(big_divisor)
nop
blr arg1,r0
nop
LSYM(zero_divisor) /* this label is here to provide external visibility */
addit,= 0,arg1,0 /* trap for zero dvr */
nop
MILLIRET /* divisor == 1 */
copy arg0,retreg
MILLIRET /* divisor == 2 */
extru arg0,30,31,retreg
MILLI_BEN($$divU_3) /* divisor == 3 */
nop
MILLIRET /* divisor == 4 */
extru arg0,29,30,retreg
MILLI_BEN($$divU_5) /* divisor == 5 */
nop
MILLI_BEN($$divU_6) /* divisor == 6 */
nop
MILLI_BEN($$divU_7) /* divisor == 7 */
nop
MILLIRET /* divisor == 8 */
extru arg0,28,29,retreg
MILLI_BEN($$divU_9) /* divisor == 9 */
nop
MILLI_BEN($$divU_10) /* divisor == 10 */
nop
b LREF(normal) /* divisor == 11 */
ds r0,temp,r0 /* set V-bit to 1 */
MILLI_BEN($$divU_12) /* divisor == 12 */
nop
b LREF(normal) /* divisor == 13 */
ds r0,temp,r0 /* set V-bit to 1 */
MILLI_BEN($$divU_14) /* divisor == 14 */
nop
MILLI_BEN($$divU_15) /* divisor == 15 */
nop
/* Handle the case where the high bit is on in the divisor.
Compute: if( dividend>=divisor) quotient=1; else quotient=0;
Note: dividend>==divisor iff dividend-divisor does not borrow
and not borrow iff carry. */
LSYM(big_divisor)
sub arg0,arg1,r0
MILLIRET
addc r0,r0,retreg
.exit
.procend
.end
#endif
This diff is collapsed.
/* 32 and 64-bit millicode, original author Hewlett-Packard
adapted for gcc by Paul Bame <bame@debian.org>
and Alan Modra <alan@linuxcare.com.au>.
Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC and is released under the terms of
of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
See the file COPYING in the top-level GCC source directory for a copy
of the license. */
#include "milli.h"
#ifdef L_dyncall
SUBSPA_MILLI
ATTR_DATA
GSYM($$dyncall)
.export $$dyncall,millicode
.proc
.callinfo millicode
.entry
bb,>=,n %r22,30,LREF(1) ; branch if not plabel address
depi 0,31,2,%r22 ; clear the two least significant bits
ldw 4(%r22),%r19 ; load new LTP value
ldw 0(%r22),%r22 ; load address of target
LSYM(1)
bv %r0(%r22) ; branch to the real target
stw %r2,-24(%r30) ; save return address into frame marker
.exit
.procend
#endif
This diff is collapsed.
/* 32 and 64-bit millicode, original author Hewlett-Packard
adapted for gcc by Paul Bame <bame@debian.org>
and Alan Modra <alan@linuxcare.com.au>.
Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC and is released under the terms of
of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
See the file COPYING in the top-level GCC source directory for a copy
of the license. */
#ifndef _PA_MILLI_H_
#define _PA_MILLI_H_
#define L_dyncall
#define L_divI
#define L_divU
#define L_remI
#define L_remU
#define L_div_const
#define L_mulI
#ifdef CONFIG_64BIT
.level 2.0w
#endif
/* Hardware General Registers. */
r0: .reg %r0
r1: .reg %r1
r2: .reg %r2
r3: .reg %r3
r4: .reg %r4
r5: .reg %r5
r6: .reg %r6
r7: .reg %r7
r8: .reg %r8
r9: .reg %r9
r10: .reg %r10
r11: .reg %r11
r12: .reg %r12
r13: .reg %r13
r14: .reg %r14
r15: .reg %r15
r16: .reg %r16
r17: .reg %r17
r18: .reg %r18
r19: .reg %r19
r20: .reg %r20
r21: .reg %r21
r22: .reg %r22
r23: .reg %r23
r24: .reg %r24
r25: .reg %r25
r26: .reg %r26
r27: .reg %r27
r28: .reg %r28
r29: .reg %r29
r30: .reg %r30
r31: .reg %r31
/* Hardware Space Registers. */
sr0: .reg %sr0
sr1: .reg %sr1
sr2: .reg %sr2
sr3: .reg %sr3
sr4: .reg %sr4
sr5: .reg %sr5
sr6: .reg %sr6
sr7: .reg %sr7
/* Hardware Floating Point Registers. */
fr0: .reg %fr0
fr1: .reg %fr1
fr2: .reg %fr2
fr3: .reg %fr3
fr4: .reg %fr4
fr5: .reg %fr5
fr6: .reg %fr6
fr7: .reg %fr7
fr8: .reg %fr8
fr9: .reg %fr9
fr10: .reg %fr10
fr11: .reg %fr11
fr12: .reg %fr12
fr13: .reg %fr13
fr14: .reg %fr14
fr15: .reg %fr15
/* Hardware Control Registers. */
cr11: .reg %cr11
sar: .reg %cr11 /* Shift Amount Register */
/* Software Architecture General Registers. */
rp: .reg r2 /* return pointer */
#ifdef CONFIG_64BIT
mrp: .reg r2 /* millicode return pointer */
#else
mrp: .reg r31 /* millicode return pointer */
#endif
ret0: .reg r28 /* return value */
ret1: .reg r29 /* return value (high part of double) */
sp: .reg r30 /* stack pointer */
dp: .reg r27 /* data pointer */
arg0: .reg r26 /* argument */
arg1: .reg r25 /* argument or high part of double argument */
arg2: .reg r24 /* argument */
arg3: .reg r23 /* argument or high part of double argument */
/* Software Architecture Space Registers. */
/* sr0 ; return link from BLE */
sret: .reg sr1 /* return value */
sarg: .reg sr1 /* argument */
/* sr4 ; PC SPACE tracker */
/* sr5 ; process private data */
/* Frame Offsets (millicode convention!) Used when calling other
millicode routines. Stack unwinding is dependent upon these
definitions. */
r31_slot: .equ -20 /* "current RP" slot */
sr0_slot: .equ -16 /* "static link" slot */
#if defined(CONFIG_64BIT)
mrp_slot: .equ -16 /* "current RP" slot */
psp_slot: .equ -8 /* "previous SP" slot */
#else
mrp_slot: .equ -20 /* "current RP" slot (replacing "r31_slot") */
#endif
#define DEFINE(name,value)name: .EQU value
#define RDEFINE(name,value)name: .REG value
#ifdef milliext
#define MILLI_BE(lbl) BE lbl(sr7,r0)
#define MILLI_BEN(lbl) BE,n lbl(sr7,r0)
#define MILLI_BLE(lbl) BLE lbl(sr7,r0)
#define MILLI_BLEN(lbl) BLE,n lbl(sr7,r0)
#define MILLIRETN BE,n 0(sr0,mrp)
#define MILLIRET BE 0(sr0,mrp)
#define MILLI_RETN BE,n 0(sr0,mrp)
#define MILLI_RET BE 0(sr0,mrp)
#else
#define MILLI_BE(lbl) B lbl
#define MILLI_BEN(lbl) B,n lbl
#define MILLI_BLE(lbl) BL lbl,mrp
#define MILLI_BLEN(lbl) BL,n lbl,mrp
#define MILLIRETN BV,n 0(mrp)
#define MILLIRET BV 0(mrp)
#define MILLI_RETN BV,n 0(mrp)
#define MILLI_RET BV 0(mrp)
#endif
#define CAT(a,b) a##b
#define SUBSPA_MILLI .section .text
#define SUBSPA_MILLI_DIV .section .text.div,"ax",@progbits! .align 16
#define SUBSPA_MILLI_MUL .section .text.mul,"ax",@progbits! .align 16
#define ATTR_MILLI
#define SUBSPA_DATA .section .data
#define ATTR_DATA
#define GLOBAL $global$
#define GSYM(sym) !sym:
#define LSYM(sym) !CAT(.L,sym:)
#define LREF(sym) CAT(.L,sym)
#endif /*_PA_MILLI_H_*/
This diff is collapsed.
/* 32 and 64-bit millicode, original author Hewlett-Packard
adapted for gcc by Paul Bame <bame@debian.org>
and Alan Modra <alan@linuxcare.com.au>.
Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC and is released under the terms of
of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
See the file COPYING in the top-level GCC source directory for a copy
of the license. */
#include "milli.h"
#ifdef L_remI
/* ROUTINE: $$remI
DESCRIPTION:
. $$remI returns the remainder of the division of two signed 32-bit
. integers. The sign of the remainder is the same as the sign of
. the dividend.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = destroyed
. arg1 = destroyed
. ret1 = remainder
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions: DIVIDE BY ZERO
. Changes memory at the following places: NONE
PERMISSIBLE CONTEXT:
. Unwindable
. Does not create a stack frame
. Is usable for internal or external microcode
DISCUSSION:
. Calls other millicode routines via mrp: NONE
. Calls other millicode routines: NONE */
RDEFINE(tmp,r1)
RDEFINE(retreg,ret1)
SUBSPA_MILLI
ATTR_MILLI
.proc
.callinfo millicode
.entry
GSYM($$remI)
GSYM($$remoI)
.export $$remI,MILLICODE
.export $$remoI,MILLICODE
ldo -1(arg1),tmp /* is there at most one bit set ? */
and,<> arg1,tmp,r0 /* if not, don't use power of 2 */
addi,> 0,arg1,r0 /* if denominator > 0, use power */
/* of 2 */
b,n LREF(neg_denom)
LSYM(pow2)
comb,>,n 0,arg0,LREF(neg_num) /* is numerator < 0 ? */
and arg0,tmp,retreg /* get the result */
MILLIRETN
LSYM(neg_num)
subi 0,arg0,arg0 /* negate numerator */
and arg0,tmp,retreg /* get the result */
subi 0,retreg,retreg /* negate result */
MILLIRETN
LSYM(neg_denom)
addi,< 0,arg1,r0 /* if arg1 >= 0, it's not power */
/* of 2 */
b,n LREF(regular_seq)
sub r0,arg1,tmp /* make denominator positive */
comb,=,n arg1,tmp,LREF(regular_seq) /* test against 0x80000000 and 0 */
ldo -1(tmp),retreg /* is there at most one bit set ? */
and,= tmp,retreg,r0 /* if not, go to regular_seq */
b,n LREF(regular_seq)
comb,>,n 0,arg0,LREF(neg_num_2) /* if arg0 < 0, negate it */
and arg0,retreg,retreg
MILLIRETN
LSYM(neg_num_2)
subi 0,arg0,tmp /* test against 0x80000000 */
and tmp,retreg,retreg
subi 0,retreg,retreg
MILLIRETN
LSYM(regular_seq)
addit,= 0,arg1,0 /* trap if div by zero */
add,>= 0,arg0,retreg /* move dividend, if retreg < 0, */
sub 0,retreg,retreg /* make it positive */
sub 0,arg1, tmp /* clear carry, */
/* negate the divisor */
ds 0, tmp,0 /* set V-bit to the comple- */
/* ment of the divisor sign */
or 0,0, tmp /* clear tmp */
add retreg,retreg,retreg /* shift msb bit into carry */
ds tmp,arg1, tmp /* 1st divide step, if no carry */
/* out, msb of quotient = 0 */
addc retreg,retreg,retreg /* shift retreg with/into carry */
LSYM(t1)
ds tmp,arg1, tmp /* 2nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 3rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 4th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 5th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 6th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 7th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 8th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 9th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 10th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 11th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 12th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 13th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 14th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 15th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 16th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 17th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 18th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 19th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 20th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 21st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 22nd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 23rd divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 24th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 25th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 26th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 27th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 28th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 29th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 30th divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 31st divide step */
addc retreg,retreg,retreg /* shift retreg with/into carry */
ds tmp,arg1, tmp /* 32nd divide step, */
addc retreg,retreg,retreg /* shift last bit into retreg */
movb,>=,n tmp,retreg,LREF(finish) /* branch if pos. tmp */
add,< arg1,0,0 /* if arg1 > 0, add arg1 */
add,tr tmp,arg1,retreg /* for correcting remainder tmp */
sub tmp,arg1,retreg /* else add absolute value arg1 */
LSYM(finish)
add,>= arg0,0,0 /* set sign of remainder */
sub 0,retreg,retreg /* to sign of dividend */
MILLIRET
nop
.exit
.procend
#ifdef milliext
.origin 0x00000200
#endif
.end
#endif
/* 32 and 64-bit millicode, original author Hewlett-Packard
adapted for gcc by Paul Bame <bame@debian.org>
and Alan Modra <alan@linuxcare.com.au>.
Copyright 2001, 2002, 2003 Free Software Foundation, Inc.
This file is part of GCC and is released under the terms of
of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
See the file COPYING in the top-level GCC source directory for a copy
of the license. */
#include "milli.h"
#ifdef L_remU
/* ROUTINE: $$remU
. Single precision divide for remainder with unsigned binary integers.
.
. The remainder must be dividend-(dividend/divisor)*divisor.
. Divide by zero is trapped.
INPUT REGISTERS:
. arg0 == dividend
. arg1 == divisor
. mrp == return pc
. sr0 == return space when called externally
OUTPUT REGISTERS:
. arg0 = undefined
. arg1 = undefined
. ret1 = remainder
OTHER REGISTERS AFFECTED:
. r1 = undefined
SIDE EFFECTS:
. Causes a trap under the following conditions: DIVIDE BY ZERO
. Changes memory at the following places: NONE
PERMISSIBLE CONTEXT:
. Unwindable.
. Does not create a stack frame.
. Suitable for internal or external millicode.
. Assumes the special millicode register conventions.
DISCUSSION:
. Calls other millicode routines using mrp: NONE
. Calls other millicode routines: NONE */
RDEFINE(temp,r1)
RDEFINE(rmndr,ret1) /* r29 */
SUBSPA_MILLI
ATTR_MILLI
.export $$remU,millicode
.proc
.callinfo millicode
.entry
GSYM($$remU)
ldo -1(arg1),temp /* is there at most one bit set ? */
and,= arg1,temp,r0 /* if not, don't use power of 2 */
b LREF(regular_seq)
addit,= 0,arg1,r0 /* trap on div by zero */
and arg0,temp,rmndr /* get the result for power of 2 */
MILLIRETN
LSYM(regular_seq)
comib,>=,n 0,arg1,LREF(special_case)
subi 0,arg1,rmndr /* clear carry, negate the divisor */
ds r0,rmndr,r0 /* set V-bit to 1 */
add arg0,arg0,temp /* shift msb bit into carry */
ds r0,arg1,rmndr /* 1st divide step, if no carry */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 2nd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 3rd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 4th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 5th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 6th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 7th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 8th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 9th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 10th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 11th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 12th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 13th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 14th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 15th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 16th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 17th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 18th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 19th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 20th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 21st divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 22nd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 23rd divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 24th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 25th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 26th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 27th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 28th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 29th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 30th divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 31st divide step */
addc temp,temp,temp /* shift temp with/into carry */
ds rmndr,arg1,rmndr /* 32nd divide step, */
comiclr,<= 0,rmndr,r0
add rmndr,arg1,rmndr /* correction */
MILLIRETN
nop
/* Putting >= on the last DS and deleting COMICLR does not work! */
LSYM(special_case)
sub,>>= arg0,arg1,rmndr
copy arg0,rmndr
MILLIRETN
nop
.exit
.procend
.end
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment