rcx

miscellaneous C library
git clone git://git.rr3.xyz/rcx
Log | Files | Refs | README | LICENSE

commit e46be0f61a89952d9cd984722583bf2101c82318
parent 5a4934636d9eeaada63271c7f58bcdae55ecad60
Author: Robert Russell <robert@rr3.xyz>
Date:   Sat,  1 Feb 2025 17:05:10 -0800

Remove bit zips

On second thought, these are too specific for RCX. I moved them to
VCE (the project I originally added them for).

Diffstat:
Minc/bits.h | 68+++++++++-----------------------------------------------------------
1 file changed, 9 insertions(+), 59 deletions(-)

diff --git a/inc/bits.h b/inc/bits.h @@ -8,65 +8,15 @@ /* ----- Bit rotates ----- */ -static inline u8 r_rotl8 (u8 n, uint c) { return (n << (c& 7u)) | (n >> (-c& 7u)); } -static inline u16 r_rotl16(u16 n, uint c) { return (n << (c&15u)) | (n >> (-c&15u)); } -static inline u32 r_rotl32(u32 n, uint c) { return (n << (c&31u)) | (n >> (-c&31u)); } -static inline u64 r_rotl64(u64 n, uint c) { return (n << (c&63u)) | (n >> (-c&63u)); } - -static inline u8 r_rotr8 (u8 n, uint c) { return (n >> (c& 7u)) | (n << (-c& 7u)); } -static inline u16 r_rotr16(u16 n, uint c) { return (n >> (c&15u)) | (n << (-c&15u)); } -static inline u32 r_rotr32(u32 n, uint c) { return (n >> (c&31u)) | (n << (-c&31u)); } -static inline u64 r_rotr64(u64 n, uint c) { return (n >> (c&63u)) | (n << (-c&63u)); } - - -/* ----- Bit zips ----- */ - -/* To perform a bit zip with n inputs, we evaluate, over the integers mod 2 - * (where XOR is addition and AND is multiplication), a recursively-structured - * polynomial with n variables, 2^n terms, and coefficients f0,f1,...,f(2^n-1), - * the bits of the zipping function f. The pattern is illustrated below for the - * first few values of n. In principle, we do this independently for each of - * the 64 n-tuples of input bits, but C's bit-wise operators let us do all 64 - * evaluations in parallel. */ - -/* TODO: Provide 8, 16, and 32 bit as well? The naming gets awkward, and then - * there would be 16 functions! */ - -/* This is the constant polynomial f0. We use a two's complement trick to - * broadcast f0 to an entire u64. */ -static inline u64 -r_bz0(u64 f) { - return !(f & 1u) - 1u; -} - -/* f0 ^ - * f1 & u */ -static inline u64 -r_bz1(u64 f, u64 u) { - return r_bz0(f) ^ (r_bz0(f >> 1) & u); -} - -/* f0 ^ - * f1 & u ^ - * f2 & v ^ - * f3 & u & v */ -static inline u64 -r_bz2(u64 f, u64 u, u64 v) { - return r_bz1(f, u) ^ (r_bz1(f >> 2, u) & v); -} - -/* f0 ^ - * f1 & u ^ - * f2 & v ^ - * f3 & u & v ^ - * f4 & w ^ - * f5 & u & w ^ - * f6 & v & w ^ - * f7 & u & v & w */ -static inline u64 -r_bz3(u64 f, u64 u, u64 v, u64 w) { - return r_bz2(f, u, v) ^ (r_bz2(f >> 4, u, v) & w); -} +static inline u8 r_rotl8 (u8 n, uint c) { return (n << (c & 7u)) | (n >> (-c & 7u)); } +static inline u16 r_rotl16(u16 n, uint c) { return (n << (c & 15u)) | (n >> (-c & 15u)); } +static inline u32 r_rotl32(u32 n, uint c) { return (n << (c & 31u)) | (n >> (-c & 31u)); } +static inline u64 r_rotl64(u64 n, uint c) { return (n << (c & 63u)) | (n >> (-c & 63u)); } + +static inline u8 r_rotr8 (u8 n, uint c) { return (n >> (c & 7u)) | (n << (-c & 7u)); } +static inline u16 r_rotr16(u16 n, uint c) { return (n >> (c & 15u)) | (n << (-c & 15u)); } +static inline u32 r_rotr32(u32 n, uint c) { return (n >> (c & 31u)) | (n << (-c & 31u)); } +static inline u64 r_rotr64(u64 n, uint c) { return (n >> (c & 63u)) | (n << (-c & 63u)); } /* ----- Population count ----- */