diff options
author | Thomas Bushnell <tb@debian.org> | 2006-10-23 23:31:59 -0700 |
---|---|---|
committer | Bryan Newbold <bnewbold@robocracy.org> | 2017-02-20 00:05:35 -0800 |
commit | 906cc4f0899080f1b832af98b7ccbcc257b8a64e (patch) | |
tree | 14e3ec231debdca4da9c1a3ccc0536033c1c8a4b /ramap.c | |
parent | 3d573fa54db0fdcae8b2a20356faa46c8e335206 (diff) | |
parent | 50eb784bfcf15ee3c6b0b53d747db92673395040 (diff) | |
download | scm-debian/5e3-1.tar.gz scm-debian/5e3-1.zip |
Import Debian changes 5e3-1debian/5e3-1
scm (5e3-1) unstable; urgency=low
* New upstream release.
* debian/control (Architecture): Add ia64 to list of supported archs. Now
everything but s390 is listed. (Closes: #335980)
(Build-Depends): Require at least version 3a4 of slib.
* Change to continue.h from version 5e1-2 repeated here.
* Change to xgen.scm from version 5e2-4 repeated here.
* Change to scm.1 from version 5e2-4 repeated here.
Diffstat (limited to 'ramap.c')
-rw-r--r-- | ramap.c | 300 |
1 files changed, 215 insertions, 85 deletions
@@ -132,11 +132,12 @@ int ra_matchp(ra0, ras) break; case tc7_smob: if (!ARRAYP(ra1)) goto scalar; - if (ndim != ARRAY_NDIM(ra1)) + if (ndim != ARRAY_NDIM(ra1)) { if (0==ARRAY_NDIM(ra1)) goto scalar; else return 0; + } s1 = ARRAY_DIMS(ra1); if (bas0 != ARRAY_BASE(ra1)) exact = 3; for (i = 0; i < ndim; i++) @@ -309,7 +310,7 @@ static int racp(src, dst) for (; n-- > 0; i_s += inc_s, i_d += inc_d) CHARS(dst)[i_d] = CHARS(src)[i_s]; break; - case tc7_bvect: if (tc7_bvect != TYP7(src)) goto gencase; + case tc7_Vbool: if (tc7_Vbool != TYP7(src)) goto gencase; if (1==inc_d && 1==inc_s && i_s%LONG_BIT==i_d%LONG_BIT && n>=LONG_BIT) { long *sv = (long *)VELTS(src); long *dv = (long *)VELTS(dst); @@ -324,8 +325,8 @@ static int racp(src, dst) IVDEP(src != dst, for (; n >= LONG_BIT; n -= LONG_BIT, sv++, dv++) *dv = *sv;) - if (n) /* trailing partial word */ - *dv = (*dv & (~0L<<n)) | (*sv & ~(~0L<<n)); + if (n) /* trailing partial word */ + *dv = (*dv & (~0L<<n)) | (*sv & ~(~0L<<n)); } else { for (; n-- > 0; i_s += inc_s, i_d += inc_d) @@ -335,15 +336,15 @@ static int racp(src, dst) VELTS(dst)[i_d/LONG_BIT] &= ~(1L << (i_d%LONG_BIT)); } break; - case tc7_uvect: - case tc7_ivect: { + case tc7_VfixN32: + case tc7_VfixZ32: { long *d = (long *)VELTS(dst), *s = (long *)VELTS(src); if (TYP7(src)==TYP7(dst)) { IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = s[i_s];) - } - else if (tc7_ivect==TYP7(dst)) + } + else if (tc7_VfixZ32==TYP7(dst)) for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = num2long(cvref(src, i_s, UNDEFINED), (char *)ARG2, s_array_copy); @@ -354,86 +355,135 @@ static int racp(src, dst) break; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *d = (float *)VELTS(dst); float *s = (float *)VELTS(src); switch TYP7(src) { default: goto gencase; - case tc7_ivect: case tc7_uvect: + case tc7_VfixZ32: case tc7_VfixN32: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = ((long *)s)[i_s]; ) - break; - case tc7_fvect: + break; + case tc7_VfloR32: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = s[i_s]; ) - break; - case tc7_dvect: + break; + case tc7_VfloR64: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = ((double *)s)[i_s]; ) - break; + break; } break; } - case tc7_dvect: { + case tc7_VfloR64: { double *d = (double *)VELTS(dst); double *s = (double *)VELTS(src); switch TYP7(src) { default: goto gencase; - case tc7_ivect: case tc7_uvect: + case tc7_VfixZ32: case tc7_VfixN32: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = ((long *)s)[i_s]; ) - break; - case tc7_fvect: + break; + case tc7_VfloR32: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = ((float *)s)[i_s];) - break; - case tc7_dvect: + break; + case tc7_VfloR64: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) d[i_d] = s[i_s];) - break; + break; } break; } - case tc7_cvect: { + case tc7_VfloC32: { + float (*d)[2] = (float (*)[2])VELTS(dst); + float (*s)[2] = (float (*)[2])VELTS(src); + switch TYP7(src) { + default: goto gencase; + case tc7_VfixZ32: case tc7_VfixN32: + IVDEP(src != dst, + for (; n-- > 0; i_s += inc_s, i_d += inc_d) { + d[i_d][0] = ((long *)s)[i_s]; + d[i_d][1] = 0.0; + }) + break; + case tc7_VfloR32: + IVDEP(src != dst, + for (; n-- > 0; i_s += inc_s, i_d += inc_d) { + d[i_d][0] = ((float *)s)[i_s]; + d[i_d][1] = 0.0; + }) + break; + case tc7_VfloR64: + IVDEP(src != dst, + for (; n-- > 0; i_s += inc_s, i_d += inc_d) { + d[i_d][0] = ((double *)s)[i_s]; + d[i_d][1] = 0.0; + }) + break; + case tc7_VfloC32: + IVDEP(src != dst, + for (; n-- > 0; i_s += inc_s, i_d += inc_d) { + d[i_d][0] = s[i_s][0]; + d[i_d][1] = s[i_s][1]; + }) + break; + case tc7_VfloC64: + IVDEP(src != dst, + for (; n-- > 0; i_s += inc_s, i_d += inc_d) { + d[i_d][0] = ((double (*)[2])s)[i_s][0]; + d[i_d][1] = ((double (*)[2])s)[i_s][1]; + }) + break; + } + } + case tc7_VfloC64: { double (*d)[2] = (double (*)[2])VELTS(dst); double (*s)[2] = (double (*)[2])VELTS(src); switch TYP7(src) { default: goto gencase; - case tc7_ivect: case tc7_uvect: + case tc7_VfixZ32: case tc7_VfixN32: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) { d[i_d][0] = ((long *)s)[i_s]; d[i_d][1] = 0.0; }) - break; - case tc7_fvect: + break; + case tc7_VfloR32: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) { d[i_d][0] = ((float *)s)[i_s]; d[i_d][1] = 0.0; }) - break; - case tc7_dvect: + break; + case tc7_VfloR64: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) { d[i_d][0] = ((double *)s)[i_s]; d[i_d][1] = 0.0; }) - break; - case tc7_cvect: + break; + case tc7_VfloC32: + IVDEP(src != dst, + for (; n-- > 0; i_s += inc_s, i_d += inc_d) { + d[i_d][0] = ((float (*)[2])s)[i_s][0]; + d[i_d][1] = ((float (*)[2])s)[i_s][1]; + }) + break; + case tc7_VfloC64: IVDEP(src != dst, for (; n-- > 0; i_s += inc_s, i_d += inc_d) { d[i_d][0] = s[i_s][0]; d[i_d][1] = s[i_s][1]; }) + break; } - break; } # endif /* FLOATS */ } @@ -463,7 +513,7 @@ SCM ra2contig(ra, copy) len *= ARRAY_DIMS(ra)[k].ubnd - ARRAY_DIMS(ra)[k].lbnd + 1; k = ARRAY_NDIM(ra); if (ARRAY_CONTP(ra) && ((0==k) || (1==ARRAY_DIMS(ra)[k-1].inc))) { - if (tc7_bvect != TYP7(ARRAY_V(ra))) + if (tc7_Vbool != TYP7(ARRAY_V(ra))) return ra; if ((len==LENGTH(ARRAY_V(ra)) && 0==ARRAY_BASE(ra) % LONG_BIT && @@ -541,30 +591,31 @@ SCM sc2array(s, ra, prot) switch TYP7(ARRAY_V(res)) { case tc7_vector: break; - case tc7_bvect: + case tc7_Vbool: if (BOOL_T==s || BOOL_F==s) break; goto mismatch; case tc7_string: if (ICHRP(s)) break; goto mismatch; - case tc7_uvect: + case tc7_VfixN32: if (INUMP(s) && INUM(s)>=0) break; #ifdef BIGDIG if (NIMP(s) && tc16_bigpos==TYP16(s) && NUMDIGS(s)<=DIGSPERLONG) break; #endif goto mismatch; - case tc7_ivect: + case tc7_VfixZ32: if (INUMP(s)) break; #ifdef BIGDIG if (NIMP(s) && BIGP(s) && NUMDIGS(s)<=DIGSPERLONG) break; #endif goto mismatch; #ifdef FLOATS - case tc7_fvect: - case tc7_dvect: + case tc7_VfloR32: + case tc7_VfloR64: if (NUMBERP(s) && !(NIMP(s) && CPLXP(s))) break; goto mismatch; - case tc7_cvect: + case tc7_VfloC32: + case tc7_VfloC64: if (NUMBERP(s)) break; goto mismatch; #endif @@ -597,26 +648,33 @@ int ra_eqp(ra0, ras) BVE_CLR(ra0, i0); break; } - case tc7_uvect: - case tc7_ivect: + case tc7_VfixN32: + case tc7_VfixZ32: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) if (BVE_REF(ra0, i0)) if (VELTS(ra1)[i1] != VELTS(ra2)[i2]) BVE_CLR(ra0, i0); break; # ifdef FLOATS - case tc7_fvect: + case tc7_VfloR32: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) if (BVE_REF(ra0, i0)) if (((float *)VELTS(ra1))[i1] != ((float *)VELTS(ra2))[i2]) BVE_CLR(ra0, i0); break; - case tc7_dvect: + case tc7_VfloR64: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) if (BVE_REF(ra0, i0)) if (((double *)VELTS(ra1))[i1] != ((double *)VELTS(ra2))[i2]) BVE_CLR(ra0, i0); break; - case tc7_cvect: + case tc7_VfloC32: + for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) + if (BVE_REF(ra0, i0)) + if (((float *)VELTS(ra1))[2*i1] != ((float *)VELTS(ra2))[2*i2] || + ((float *)VELTS(ra1))[2*i1+1] != ((float *)VELTS(ra2))[2*i2+1]) + BVE_CLR(ra0, i0); + break; + case tc7_VfloC64: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) if (BVE_REF(ra0, i0)) if (((double *)VELTS(ra1))[2*i1] != ((double *)VELTS(ra2))[2*i2] || @@ -651,7 +709,7 @@ static int ra_compare(ra0, ra1, ra2, opt) BVE_CLR(ra0, i0); break; } - case tc7_uvect: + case tc7_VfixN32: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) { if (BVE_REF(ra0, i0)) if (opt ? @@ -660,7 +718,7 @@ static int ra_compare(ra0, ra1, ra2, opt) BVE_CLR(ra0, i0); } break; - case tc7_ivect: + case tc7_VfixZ32: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) { if (BVE_REF(ra0, i0)) if (opt ? @@ -670,7 +728,7 @@ static int ra_compare(ra0, ra1, ra2, opt) } break; # ifdef FLOATS - case tc7_fvect: + case tc7_VfloR32: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) if (BVE_REF(ra0, i0)) if (opt ? @@ -678,7 +736,7 @@ static int ra_compare(ra0, ra1, ra2, opt) ((float *)VELTS(ra1))[i1] >= ((float *)VELTS(ra2))[i2]) BVE_CLR(ra0, i0); break; - case tc7_dvect: + case tc7_VfloR64: for (; n-- > 0; i0 += inc0, i1 += inc1, i2 += inc2) if (BVE_REF(ra0, i0)) if (opt ? @@ -732,7 +790,7 @@ int ra_sum(ra0, ras) MAKINUM(i0)); break; } - case tc7_uvect: { + case tc7_VfixN32: { unsigned long r; unsigned long *v0 = (unsigned long *)VELTS(ra0); unsigned long *v1 = (unsigned long *)VELTS(ra1); @@ -744,7 +802,7 @@ int ra_sum(ra0, ras) } ); break; } - case tc7_ivect: { + case tc7_VfixZ32: { long r, *v0 = (long *)VELTS(ra0), *v1 = (long *)VELTS(ra1); IVDEP(ra0 != ra1, for (; n-- > 0; i0 += inc0, i1 += inc1) { @@ -755,7 +813,7 @@ int ra_sum(ra0, ras) break; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *v0 = (float *)VELTS(ra0); float *v1 = (float *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -763,7 +821,7 @@ int ra_sum(ra0, ras) v0[i0] += v1[i1]); break; } - case tc7_dvect: { + case tc7_VfloR64: { double *v0 = (double *)VELTS(ra0); double *v1 = (double *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -771,7 +829,17 @@ int ra_sum(ra0, ras) v0[i0] += v1[i1]); break; } - case tc7_cvect: { + case tc7_VfloC32: { + float (*v0)[2] = (float (*)[2])VELTS(ra0); + float (*v1)[2] = (float (*)[2])VELTS(ra1); + IVDEP(ra0 != ra1, + for (; n-- > 0; i0 += inc0, i1 += inc1) { + v0[i0][0] += v1[i1][0]; + v0[i0][1] += v1[i1][1]; + }); + break; + } + case tc7_VfloC64: { double (*v0)[2] = (double (*)[2])VELTS(ra0); double (*v1)[2] = (double (*)[2])VELTS(ra1); IVDEP(ra0 != ra1, @@ -802,26 +870,34 @@ int ra_difference(ra0, ras) aset(ra0, difference(RVREF(ra0, i0, e0), UNDEFINED), MAKINUM(i0)); break; } - case tc7_ivect: { + case tc7_VfixZ32: { long *v0 = VELTS(ra0); for (; n-- > 0; i0 += inc0) v0[i0] = -v0[i0]; break; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *v0 = (float *)VELTS(ra0); for (; n-- > 0; i0 += inc0) v0[i0] = -v0[i0]; break; } - case tc7_dvect: { + case tc7_VfloR64: { double *v0 = (double *)VELTS(ra0); for (; n-- > 0; i0 += inc0) v0[i0] = -v0[i0]; break; } - case tc7_cvect: { + case tc7_VfloC32: { + float (*v0)[2] = (float (*)[2])VELTS(ra0); + for (; n-- > 0; i0 += inc0) { + v0[i0][0] = -v0[i0][0]; + v0[i0][1] = -v0[i0][1]; + } + break; + } + case tc7_VfloC64: { double (*v0)[2] = (double (*)[2])VELTS(ra0); for (; n-- > 0; i0 += inc0) { v0[i0][0] = -v0[i0][0]; @@ -845,7 +921,7 @@ int ra_difference(ra0, ras) aset(ra0, difference(RVREF(ra0, i0, e0), RVREF(ra1, i1, e1)), MAKINUM(i0)); break; } - case tc7_uvect: { + case tc7_VfixN32: { unsigned long r; unsigned long *v0 = (unsigned long *)VELTS(ra0); unsigned long *v1 = (unsigned long*)VELTS(ra1); @@ -857,7 +933,7 @@ int ra_difference(ra0, ras) } ); break; } - case tc7_ivect: { + case tc7_VfixZ32: { long r, *v0 = VELTS(ra0), *v1 = VELTS(ra1); IVDEP(ra0 != ra1, for (; n-- > 0; i0 += inc0, i1 += inc1) { @@ -868,7 +944,7 @@ int ra_difference(ra0, ras) break; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *v0 = (float *)VELTS(ra0); float *v1 = (float *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -876,7 +952,7 @@ int ra_difference(ra0, ras) v0[i0] -= v1[i1]); break; } - case tc7_dvect: { + case tc7_VfloR64: { double *v0 = (double *)VELTS(ra0); double *v1 = (double *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -884,7 +960,17 @@ int ra_difference(ra0, ras) v0[i0] -= v1[i1]); break; } - case tc7_cvect: { + case tc7_VfloC32: { + float (*v0)[2] = (float (*)[2])VELTS(ra0); + float (*v1)[2] = (float (*)[2])VELTS(ra1); + IVDEP(ra0 != ra1, + for (; n-- > 0; i0 += inc0, i1 += inc1) { + v0[i0][0] -= v1[i1][0]; + v0[i0][1] -= v1[i1][1]; + }) + break; + } + case tc7_VfloC64: { double (*v0)[2] = (double (*)[2])VELTS(ra0); double (*v1)[2] = (double (*)[2])VELTS(ra1); IVDEP(ra0 != ra1, @@ -921,7 +1007,7 @@ int ra_product(ra0, ras) MAKINUM(i0)); break; } - case tc7_uvect: { + case tc7_VfixN32: { unsigned long r; unsigned long *v0 = (unsigned long *)VELTS(ra0); unsigned long *v1 = (unsigned long *)VELTS(ra1); @@ -933,7 +1019,7 @@ int ra_product(ra0, ras) } ); break; } - case tc7_ivect: { + case tc7_VfixZ32: { long r, *v0 = VELTS(ra0), *v1 =VELTS(ra1); IVDEP(ra0 != ra1, for (; n-- > 0; i0 += inc0, i1 += inc1) { @@ -944,7 +1030,7 @@ int ra_product(ra0, ras) break; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *v0 = (float *)VELTS(ra0); float *v1 = (float *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -952,7 +1038,7 @@ int ra_product(ra0, ras) v0[i0] *= v1[i1]); break; } - case tc7_dvect: { + case tc7_VfloR64: { double *v0 = (double *)VELTS(ra0); double *v1 = (double *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -960,7 +1046,19 @@ int ra_product(ra0, ras) v0[i0] *= v1[i1]); break; } - case tc7_cvect: { + case tc7_VfloC32: { + float (*v0)[2] = (float (*)[2])VELTS(ra0); + register double r; + float (*v1)[2] = (float (*)[2])VELTS(ra1); + IVDEP(ra0 != ra1, + for (; n-- > 0; i0 += inc0, i1 += inc1) { + r = v0[i0][0]*v1[i1][0] - v0[i0][1]*v1[i1][1]; + v0[i0][1] = v0[i0][0]*v1[i1][1] + v0[i0][1]*v1[i1][0]; + v0[i0][0] = r; + }); + break; + } + case tc7_VfloC64: { double (*v0)[2] = (double (*)[2])VELTS(ra0); register double r; double (*v1)[2] = (double (*)[2])VELTS(ra1); @@ -993,19 +1091,29 @@ int ra_divide(ra0, ras) break; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *v0 = (float *)VELTS(ra0); for (; n-- > 0; i0 += inc0) v0[i0] = 1.0/v0[i0]; break; } - case tc7_dvect: { + case tc7_VfloR64: { double *v0 = (double *)VELTS(ra0); for (; n-- > 0; i0 += inc0) v0[i0] = 1.0/v0[i0]; break; } - case tc7_cvect: { + case tc7_VfloC32: { + register double d; + float (*v0)[2] = (float (*)[2])VELTS(ra0); + for (; n-- > 0; i0 += inc0) { + d = v0[i0][0]*v0[i0][0] + v0[i0][1]*v0[i0][1]; + v0[i0][0] /= d; + v0[i0][1] /= -d; + } + break; + } + case tc7_VfloC64: { register double d; double (*v0)[2] = (double (*)[2])VELTS(ra0); for (; n-- > 0; i0 += inc0) { @@ -1031,7 +1139,7 @@ int ra_divide(ra0, ras) break; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *v0 = (float *)VELTS(ra0); float *v1 = (float *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -1039,7 +1147,7 @@ int ra_divide(ra0, ras) v0[i0] /= v1[i1]); break; } - case tc7_dvect: { + case tc7_VfloR64: { double *v0 = (double *)VELTS(ra0); double *v1 = (double *)VELTS(ra1); IVDEP(ra0 != ra1, @@ -1047,7 +1155,20 @@ int ra_divide(ra0, ras) v0[i0] /= v1[i1]); break; } - case tc7_cvect: { + case tc7_VfloC32: { + register double d, r; + float (*v0)[2] = (float (*)[2])VELTS(ra0); + float (*v1)[2] = (float (*)[2])VELTS(ra1); + IVDEP(ra0 != ra1, + for (; n-- > 0; i0 += inc0, i1 += inc1) { + d = v1[i1][0]*v1[i1][0] + v1[i1][1]*v1[i1][1]; + r = (v0[i0][0]*v1[i1][0] + v0[i0][1]*v1[i1][1])/d; + v0[i0][1] = (v0[i0][1]*v1[i1][0] - v0[i0][0]*v1[i1][1])/d; + v0[i0][0] = r; + }) + break; + } + case tc7_VfloC64: { register double d, r; double (*v0)[2] = (double (*)[2])VELTS(ra0); double (*v1)[2] = (double (*)[2])VELTS(ra1); @@ -1118,32 +1239,32 @@ static int ramap_cxr(ra0, proc, ras) } break; # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *dst = (float *)VELTS(ra0); switch TYP7(ra1) { default: goto gencase; - case tc7_fvect: + case tc7_VfloR32: for (; n-- > 0; i0 += inc0, i1 += inc1) dst[i0] = DSUBRF(proc)((double)((float *)VELTS(ra1))[i1]); break; - case tc7_uvect: - case tc7_ivect: + case tc7_VfixN32: + case tc7_VfixZ32: for (; n-- > 0; i0 += inc0, i1 += inc1) dst[i0] = DSUBRF(proc)((double)VELTS(ra1)[i1]); break; } break; } - case tc7_dvect: { + case tc7_VfloR64: { double *dst = (double *)VELTS(ra0); switch TYP7(ra1) { default: goto gencase; - case tc7_dvect: + case tc7_VfloR64: for (; n-- > 0; i0 += inc0, i1 += inc1) dst[i0] = DSUBRF(proc)(((double *)VELTS(ra1))[i1]); break; - case tc7_uvect: - case tc7_ivect: + case tc7_VfixN32: + case tc7_VfixZ32: for (; n-- > 0; i0 += inc0, i1 += inc1) dst[i0] = DSUBRF(proc)((double)VELTS(ra1)[i1]); break; @@ -1536,11 +1657,11 @@ static int raeql_1(ra0, as_equal, ra1) if (*v0 != *v1) return 0; return 1; } - case tc7_bvect: + case tc7_Vbool: for (; n--; i0 += inc0, i1 += inc1) if (BVE_REF(ra0, i0) != BVE_REF(ra1, i1)) return 0; return 1; - case tc7_uvect: case tc7_ivect: { + case tc7_VfixN32: case tc7_VfixZ32: { long *v0 = (long *)VELTS(ra0) + i0; long *v1 = (long *)VELTS(ra1) + i1; for (; n--; v0 += inc0, v1 += inc1) @@ -1548,21 +1669,30 @@ static int raeql_1(ra0, as_equal, ra1) return 1; } # ifdef FLOATS - case tc7_fvect: { + case tc7_VfloR32: { float *v0 = (float *)VELTS(ra0) + i0; float *v1 = (float *)VELTS(ra1) + i1; for (; n--; v0 += inc0, v1 += inc1) if (*v0 != *v1) return 0; return 1; } - case tc7_dvect: { + case tc7_VfloR64: { double *v0 = (double *)VELTS(ra0) + i0; double *v1 = (double *)VELTS(ra1) + i1; for (; n--; v0 += inc0, v1 += inc1) if (*v0 != *v1) return 0; return 1; } - case tc7_cvect: { + case tc7_VfloC32: { + float (*v0)[2]= (float (*)[2])VELTS(ra0) + i0; + float (*v1)[2] = (float (*)[2])VELTS(ra1) + i1; + for (; n--; v0 += inc0, v1 += inc1) { + if ((*v0)[0] != (*v1)[0]) return 0; + if ((*v0)[1] != (*v1)[1]) return 0; + } + return 1; + } + case tc7_VfloC64: { double (*v0)[2]= (double (*)[2])VELTS(ra0) + i0; double (*v1)[2] = (double (*)[2])VELTS(ra1) + i1; for (; n--; v0 += inc0, v1 += inc1) { |