diff options
author | Andy Wingo <wingo@pobox.com> | 2009-06-24 23:49:11 +0200 |
---|---|---|
committer | Andy Wingo <wingo@pobox.com> | 2009-06-26 12:42:10 +0200 |
commit | e6eb2467164de264c313af92d488144d2cdae94c (patch) | |
tree | 0c753cb23b1a9e269ca6b173f98dae14bb460265 /libguile/vm-i-scheme.c | |
parent | caa92f5e951528c1ea31b2eea8b388e9888fa19e (diff) | |
download | guile-e6eb2467164de264c313af92d488144d2cdae94c.tar.gz |
add bytevector ops to the vm
* libguile/instructions.h (SCM_VM_NUM_INSTRUCTIONS): Enlarge to 255. Not
sure what performance effects this will have.
* libguile/vm-engine.c: Add new error case, vm_error_not_a_bytevector.
* libguile/vm-engine.h: Don't assign specific registers for i386. Having
added the new VM vector ops, GCC 4.4 is erroring for me now.
* libguile/vm-i-scheme.c: Add bytevector-specific ops to the VM.
We don't actually use them yet, though.
Diffstat (limited to 'libguile/vm-i-scheme.c')
-rw-r--r-- | libguile/vm-i-scheme.c | 217 |
1 files changed, 217 insertions, 0 deletions
diff --git a/libguile/vm-i-scheme.c b/libguile/vm-i-scheme.c index 4fc026c48..e074d36a0 100644 --- a/libguile/vm-i-scheme.c +++ b/libguile/vm-i-scheme.c @@ -279,6 +279,223 @@ VM_DEFINE_INSTRUCTION (108, slot_set, "slot-set", 0, 3, 0) NEXT; } +#define VM_VALIDATE_BYTEVECTOR(x) \ + if (SCM_UNLIKELY (!SCM_BYTEVECTOR_P (x))) \ + { finish_args = x; \ + goto vm_error_not_a_bytevector; \ + } + +#define BV_REF_WITH_ENDIANNESS(stem, fn_stem) \ +{ \ + SCM endianness; \ + POP (endianness); \ + if (scm_is_eq (endianness, scm_i_native_endianness)) \ + goto VM_LABEL (bv_##stem##_native_ref); \ + { \ + ARGS2 (bv, idx); \ + RETURN (scm_bytevector_##fn_stem##_ref (bv, idx, endianness)); \ + } \ +} + +VM_DEFINE_FUNCTION (109, bv_u16_ref, "bv-u16-ref", 3) +BV_REF_WITH_ENDIANNESS (u16, u16) +VM_DEFINE_FUNCTION (110, bv_s16_ref, "bv-s16-ref", 3) +BV_REF_WITH_ENDIANNESS (s16, s16) +VM_DEFINE_FUNCTION (111, bv_u32_ref, "bv-u32-ref", 3) +BV_REF_WITH_ENDIANNESS (u32, u32) +VM_DEFINE_FUNCTION (112, bv_s32_ref, "bv-s32-ref", 3) +BV_REF_WITH_ENDIANNESS (s32, s32) +VM_DEFINE_FUNCTION (113, bv_u64_ref, "bv-u64-ref", 3) +BV_REF_WITH_ENDIANNESS (u64, u64) +VM_DEFINE_FUNCTION (114, bv_s64_ref, "bv-s64-ref", 3) +BV_REF_WITH_ENDIANNESS (s64, s64) +VM_DEFINE_FUNCTION (115, bv_f32_ref, "bv-f32-ref", 3) +BV_REF_WITH_ENDIANNESS (f32, ieee_single) +VM_DEFINE_FUNCTION (116, bv_f64_ref, "bv-f64-ref", 3) +BV_REF_WITH_ENDIANNESS (f64, ieee_double) + +#undef BV_REF_WITH_ENDIANNESS + +#define BV_FIXABLE_INT_REF(stem, fn_stem, type, size) \ +{ \ + long i; \ + ARGS2 (bv, idx); \ + VM_VALIDATE_BYTEVECTOR (bv); \ + if (SCM_LIKELY (SCM_I_INUMP (idx) \ + && ((i = SCM_INUM (idx)) >= 0) \ + && (i < SCM_BYTEVECTOR_LENGTH (bv)) \ + && (i % size == 0))) \ + RETURN (SCM_I_MAKINUM (*(scm_t_##type*) \ + (SCM_BYTEVECTOR_CONTENTS (bv) + i))); \ + else \ + RETURN (scm_bytevector_##fn_stem##_ref (bv, idx)); \ +} + +#define BV_INT_REF(stem, type, size) \ +{ \ + long i; \ + ARGS2 (bv, idx); \ + VM_VALIDATE_BYTEVECTOR (bv); \ + if (SCM_LIKELY (SCM_I_INUMP (idx) \ + && ((i = SCM_INUM (idx)) >= 0) \ + && (i < SCM_BYTEVECTOR_LENGTH (bv)) \ + && (i % size == 0))) \ + { scm_t_##type x = (*(scm_t_##type*)(SCM_BYTEVECTOR_CONTENTS (bv) + i)); \ + if (SCM_FIXABLE (x)) \ + RETURN (SCM_I_MAKINUM (x)); \ + else \ + RETURN (scm_from_##type (x)); \ + } \ + else \ + RETURN (scm_bytevector_##stem##_native_ref (bv, idx)); \ +} + +#define BV_FLOAT_REF(stem, fn_stem, type, size) \ +{ \ + long i; \ + ARGS2 (bv, idx); \ + VM_VALIDATE_BYTEVECTOR (bv); \ + if (SCM_LIKELY (SCM_I_INUMP (idx) \ + && ((i = SCM_INUM (idx)) >= 0) \ + && (i < SCM_BYTEVECTOR_LENGTH (bv)) \ + && (i % size == 0))) \ + RETURN (scm_from_double ((*(type*)(SCM_BYTEVECTOR_CONTENTS (bv) + i)))); \ + else \ + RETURN (scm_bytevector_##fn_stem##_native_ref (bv, idx)); \ +} + +VM_DEFINE_FUNCTION (117, bv_u8_ref, "bv-u8-ref", 2) +BV_FIXABLE_INT_REF (u8, u8, uint8, 1) +VM_DEFINE_FUNCTION (118, bv_s8_ref, "bv-s8-ref", 2) +BV_FIXABLE_INT_REF (s8, s8, int8, 1) +VM_DEFINE_FUNCTION (119, bv_u16_native_ref, "bv-u16-native-ref", 2) +BV_FIXABLE_INT_REF (u16, u16_native, uint16, 2) +VM_DEFINE_FUNCTION (120, bv_s16_native_ref, "bv-s16-native-ref", 2) +BV_FIXABLE_INT_REF (s16, s16_native, int16, 2) +VM_DEFINE_FUNCTION (121, bv_u32_native_ref, "bv-u32-native-ref", 2) +/* FIXME: u32 is always a fixnum on 64-bit builds */ +BV_INT_REF (u32, uint32, 4) +VM_DEFINE_FUNCTION (122, bv_s32_native_ref, "bv-s32-native-ref", 2) +BV_INT_REF (s32, int32, 4) +VM_DEFINE_FUNCTION (123, bv_u64_native_ref, "bv-u64-native-ref", 2) +BV_INT_REF (u64, uint64, 8) +VM_DEFINE_FUNCTION (124, bv_s64_native_ref, "bv-s64-native-ref", 2) +BV_INT_REF (s64, int64, 8) +VM_DEFINE_FUNCTION (125, bv_f32_native_ref, "bv-f32-native-ref", 2) +BV_FLOAT_REF (f32, ieee_single, float, 4) +VM_DEFINE_FUNCTION (126, bv_f64_native_ref, "bv-f64-native-ref", 2) +BV_FLOAT_REF (f64, ieee_double, double, 8) + +#undef BV_FIXABLE_INT_REF +#undef BV_INT_REF +#undef BV_FLOAT_REF + + + +#define BV_SET_WITH_ENDIANNESS(stem, fn_stem) \ +{ \ + SCM endianness; \ + POP (endianness); \ + if (scm_is_eq (endianness, scm_i_native_endianness)) \ + goto VM_LABEL (bv_##stem##_native_set); \ + { \ + ARGS3 (bv, idx, val); \ + RETURN (scm_bytevector_##fn_stem##_set_x (bv, idx, val, endianness)); \ + } \ +} + +VM_DEFINE_FUNCTION (127, bv_u16_set, "bv-u16-set", 4) +BV_SET_WITH_ENDIANNESS (u16, u16) +VM_DEFINE_FUNCTION (128, bv_s16_set, "bv-s16-set", 4) +BV_SET_WITH_ENDIANNESS (s16, s16) +VM_DEFINE_FUNCTION (129, bv_u32_set, "bv-u32-set", 4) +BV_SET_WITH_ENDIANNESS (u32, u32) +VM_DEFINE_FUNCTION (130, bv_s32_set, "bv-s32-set", 4) +BV_SET_WITH_ENDIANNESS (s32, s32) +VM_DEFINE_FUNCTION (131, bv_u64_set, "bv-u64-set", 4) +BV_SET_WITH_ENDIANNESS (u64, u64) +VM_DEFINE_FUNCTION (132, bv_s64_set, "bv-s64-set", 4) +BV_SET_WITH_ENDIANNESS (s64, s64) +VM_DEFINE_FUNCTION (133, bv_f32_set, "bv-f32-set", 4) +BV_SET_WITH_ENDIANNESS (f32, ieee_single) +VM_DEFINE_FUNCTION (134, bv_f64_set, "bv-f64-set", 4) +BV_SET_WITH_ENDIANNESS (f64, ieee_double) + +#undef BV_SET_WITH_ENDIANNESS + +#define BV_FIXABLE_INT_SET(stem, fn_stem, type, min, max, size) \ +{ \ + long i, j; \ + ARGS3 (bv, idx, val); \ + VM_VALIDATE_BYTEVECTOR (bv); \ + if (SCM_LIKELY (SCM_I_INUMP (idx) \ + && ((i = SCM_INUM (idx)) >= 0) \ + && (i < SCM_BYTEVECTOR_LENGTH (bv)) \ + && (i % size == 0) \ + && (SCM_I_INUMP (val)) \ + && ((j = SCM_INUM (val)) >= min) \ + && (j <= max))) \ + *(scm_t_##type*) (SCM_BYTEVECTOR_CONTENTS (bv) + i) = (scm_t_##type)j; \ + else \ + scm_bytevector_##fn_stem##_set_x (bv, idx, val); \ + NEXT; \ +} + +#define BV_INT_SET(stem, type, size) \ +{ \ + long i; \ + ARGS3 (bv, idx, val); \ + VM_VALIDATE_BYTEVECTOR (bv); \ + if (SCM_LIKELY (SCM_I_INUMP (idx) \ + && ((i = SCM_INUM (idx)) >= 0) \ + && (i < SCM_BYTEVECTOR_LENGTH (bv)) \ + && (i % size == 0))) \ + *(scm_t_##type*) (SCM_BYTEVECTOR_CONTENTS (bv) + i) = scm_to_##type (val); \ + else \ + scm_bytevector_##stem##_native_set_x (bv, idx, val); \ + NEXT; \ +} + +#define BV_FLOAT_SET(stem, fn_stem, type, size) \ +{ \ + long i; \ + ARGS3 (bv, idx, val); \ + VM_VALIDATE_BYTEVECTOR (bv); \ + if (SCM_LIKELY (SCM_I_INUMP (idx) \ + && ((i = SCM_INUM (idx)) >= 0) \ + && (i < SCM_BYTEVECTOR_LENGTH (bv)) \ + && (i % size == 0))) \ + *(type*) (SCM_BYTEVECTOR_CONTENTS (bv) + i) = scm_to_double (val); \ + else \ + scm_bytevector_##fn_stem##_native_set_x (bv, idx, val); \ +} + +VM_DEFINE_FUNCTION (135, bv_u8_set, "bv-u8-set", 3) +BV_FIXABLE_INT_SET (u8, u8, uint8, 0, SCM_T_UINT8_MAX, 1) +VM_DEFINE_FUNCTION (136, bv_s8_set, "bv-s8-set", 3) +BV_FIXABLE_INT_SET (s8, s8, int8, SCM_T_INT8_MIN, SCM_T_INT8_MAX, 1) +VM_DEFINE_FUNCTION (137, bv_u16_native_set, "bv-u16-native-set", 3) +BV_FIXABLE_INT_SET (u16, u16_native, uint16, 0, SCM_T_UINT16_MAX, 3) +VM_DEFINE_FUNCTION (138, bv_s16_native_set, "bv-s16-native-set", 3) +BV_FIXABLE_INT_SET (s16, s16_native, int16, SCM_T_INT16_MIN, SCM_T_INT16_MAX, 3) +VM_DEFINE_FUNCTION (139, bv_u32_native_set, "bv-u32-native-set", 3) +/* FIXME: u32 is always a fixnum on 64-bit builds */ +BV_INT_SET (u32, uint32, 4) +VM_DEFINE_FUNCTION (140, bv_s32_native_set, "bv-s32-native-set", 3) +BV_INT_SET (s32, int32, 4) +VM_DEFINE_FUNCTION (141, bv_u64_native_set, "bv-u64-native-set", 3) +BV_INT_SET (u64, uint64, 8) +VM_DEFINE_FUNCTION (142, bv_s64_native_set, "bv-s64-native-set", 3) +BV_INT_SET (s64, int64, 8) +VM_DEFINE_FUNCTION (143, bv_f32_native_set, "bv-f32-native-set", 3) +BV_FLOAT_SET (f32, ieee_single, float, 4) +VM_DEFINE_FUNCTION (144, bv_f64_native_set, "bv-f64-native-set", 3) +BV_FLOAT_SET (f64, ieee_double, double, 8) + +#undef BV_FIXABLE_INT_SET +#undef BV_INT_SET +#undef BV_FLOAT_SET + /* (defun renumber-ops () "start from top of buffer and renumber 'VM_DEFINE_FOO (\n' sequences" |